From 0661844da7a00af983315fe1c70c52c8cce0f8b3 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 11:55:16 +1300 Subject: [PATCH 01/48] Added some build artifacts and cscope to .gitignore --- .gitignore | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.gitignore b/.gitignore index 2a3c2df..5a045e7 100644 --- a/.gitignore +++ b/.gitignore @@ -10,3 +10,8 @@ lib/libipfix.so* libmisc/libmisc.so* probe/ipfix_probe config.h +cscope.* +*.[oa] +10.*.*.* +.depend +config.log From d5e55d8f72da12515380575186817203c780fe19 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 12:38:32 +1300 Subject: [PATCH 02/48] Converted Netscaler IEs into source format for creating libipfix C structs --- lib/ipfix_NETSCALER_IEs.txt | 121 ++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 lib/ipfix_NETSCALER_IEs.txt diff --git a/lib/ipfix_NETSCALER_IEs.txt b/lib/ipfix_NETSCALER_IEs.txt new file mode 100644 index 0000000..520a0fd --- /dev/null +++ b/lib/ipfix_NETSCALER_IEs.txt @@ -0,0 +1,121 @@ +128, IPFIX_FT_NETSCALER_ROUND_TRIP_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ROUND_TRIP_TIME", "The TCP RTT of the flow in milliseconds since the time last record was sent" +129, IPFIX_FT_NETSCALER_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "NETSCALER_TRANSACTION_ID", "At Layer-7, the four flows of a transaction between client and server (client-to-NS, NS-to-Server, Server-to-NS, NS-to-Client) are tied together using the transaction ID." +130, IPFIX_FT_NETSCALER_HTTP_REQ_URL, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_URL", "HTTP request URL" +131, IPFIX_FT_NETSCALER_HTTP_REQ_COOKIE, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_COOKIE", "Value of Cookie header present in HTTP request" +132, IPFIX_FT_NETSCALER_FLOW_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_FLOW_FLAGS", "application layer flags, for use between the exporter and collector to indicate various Layer-7 events and types like the direction of the flow (client-in, svc-out, etc.), http version, NetScaler cache served responses, SSL, compression, TCP buffering, and many more." +133, IPFIX_FT_NETSCALER_CONNECTION_ID, 4, IPFIX_CODING_UINT, "NETSCALER_CONNECTION_ID", "The two flows of a TCP connection are tied together with a connection ID" +134, IPFIX_FT_NETSCALER_SYSLOG_PRIORITY, 1, IPFIX_CODING_UINT, "NETSCALER_SYSLOG_PRIORITY", "Priority of the syslog message being logged" +135, IPFIX_FT_NETSCALER_SYSLOG_MESSAGE, 65535, IPFIX_CODING_STRING, "NETSCALER_SYSLOG_MESSAGE", "The syslog message generated on Netscaler" +136, IPFIX_FT_NETSCALER_SYSLOG_TIMESTAMP, 8, IPFIX_CODING_UINT, "NETSCALER_SYSLOG_TIMESTAMP", "Timestamp when the syslog (contained in the syslog record) was generated Number of milliseconds since Unix epoch" +140, IPFIX_FT_NETSCALER_HTTP_REQ_REFERER, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_REFERER", "Value of Referer header present in HTTP request" +141, IPFIX_FT_NETSCALER_HTTP_REQ_METHOD, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_METHOD", "The request method in HTTP request" +142, IPFIX_FT_NETSCALER_HTTP_REQ_HOST, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_HOST", "Value of Host header in HTTP request" +143, IPFIX_FT_NETSCALER_HTTP_REQ_USER_AGENT, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_USER_AGENT", "The User Agent string as seen in HTTP request header" +144, IPFIX_FT_NETSCALER_HTTP_RSP_STATUS, 2, IPFIX_CODING_UINT, "NETSCALER_HTTP_RSP_STATUS", "Status of HTTP response" +145, IPFIX_FT_NETSCALER_HTTP_RSP_LEN, 8, IPFIX_CODING_UINT, "NETSCALER_HTTP_RSP_LEN", "The total size of HTTP response" +146, IPFIX_FT_NETSCALER_SERVER_TTFB, 8, IPFIX_CODING_UINT, "NETSCALER_SERVER_TTFB", "Time elapsed in microseconds between receiving of request from the client and receiving the first byte of response from server" +147, IPFIX_FT_NETSCALER_SERVER_TTLB, 8, IPFIX_CODING_UINT, "NETSCALER_SERVER_TTLB", "Time elapsed in microseconds between receiving of request from the client and receiving the last byte of response from server" +150, IPFIX_FT_NETSCALER_APP_NAME_INCARNATION_NUMBER, 4, IPFIX_CODING_UINT, "NETSCALER_APP_NAME_INCARNATION_NUMBER", "Each named entity in the NetScaler is associated with an id. The name to id mapping is sent in the appname mapping template to the collector. Other records only contain the id of the entity and the corresponding name is stored by the collector and used when required. If a new entity gets added or an entity gets removed or modified, the incarnation number changes which indicates the collector to use the appname mapping record to update its database" +151, IPFIX_FT_NETSCALER_APP_NAME_APP_ID, 4, IPFIX_CODING_UINT, "NETSCALER_APP_NAME_APP_ID", "The id of a named entity" +152, IPFIX_FT_NETSCALER_APP_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_APP_NAME", "Name of the entity configured on Netscaler for which the name-to-id mapping is being sent in the current record" +153, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_RCV_FB", "Timestamp when the first byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +156, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_FORW_FB", "Timestamp when the first byte of request was forwarded to server from the NetScaler. Uses an NTP format of date/time" +157, IPFIX_FT_NETSCALER_HTTP_RES_RCV_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_RCV_FB", "Timestamp when the first byte of response was received from server at the NetScaler. Uses an NTP format of date/time" +158, IPFIX_FT_NETSCALER_HTTP_RES_FORW_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_FORW_FB", "Timestamp when the first byte of response was forwarded to client from the NetScaler. Uses an NTP format of date/time" +159, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_RCV_LB", "Timestamp when the last byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +160, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_FORW_LB", "Uses an NTP format of date/time" +161, IPFIX_FT_NETSCALER_MAIN_PAGE_ID, 4, IPFIX_CODING_UINT, "NETSCALER_MAIN_PAGE_ID", "In a html page, the main page transaction is associated with all its embedded object transactions. Each such embedded object transaction record contains the transaction Id of the main page so that a parent link to the main transaction can be created. This is used in generating a waterfall chart model depicting the various timing information of the entire page loading." +162, IPFIX_FT_NETSCALER_MAIN_PAGE_COREID, 4, IPFIX_CODING_UINT, "NETSCALER_MAIN_PAGE_COREID", "The above transaction ID is unique within the process. Hence the exporting process ID of the main page is also required." +163, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME", "The timestamp when the page starts loading" +164, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_END_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_RENDER_END_TIME", "The timestamp when the page completely renders" +165, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_START_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_RENDER_START_TIME", "The timestamp when page rendering begins" +167, IPFIX_FT_NETSCALER_APP_TEMPLATE_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_APP_TEMPLATE_NAME", "Name of the template to which the current entity belongs (see netscalerAppTemplateID)" +168, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME", "The NTP timestamp when the HTML page becomes interactive to the user" +169, IPFIX_FT_NETSCALER_HTTP_RES_RCV_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_RCV_LB", "Uses an NTP format of date/time" +170, IPFIX_FT_NETSCALER_HTTP_RES_FORW_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_FORW_LB", "Uses an NTP format of date/time" +171, IPFIX_FT_NETSCALER_APP_UNIT_NAME_APP_ID, 4, IPFIX_CODING_UINT, "NETSCALER_APP_UNIT_NAME_APP_ID", "Netscaler uses application templates that groups a set of entities which can be exported and imported when needed. This Information Element exportes the ID of the application template to which the entity belongs." +172, IPFIX_FT_NETSCALER_DB_LOGIN_FLAGS, 4, IPFIX_CODING_UINT, "NETSCALER_DB_LOGIN_FLAGS", "SQL login flags" +173, IPFIX_FT_NETSCALER_DB_REQ_TYPE, 1, IPFIX_CODING_UINT, "NETSCALER_DB_REQ_TYPE", "The type of database request" +174, IPFIX_FT_NETSCALER_DB_PROTOCOL_NAME, 1, IPFIX_CODING_UINT, "NETSCALER_DB_PROTOCOL_NAME", "The database protocol being used" +175, IPFIX_FT_NETSCALER_DB_USER_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_USER_NAME", "Database username" +176, IPFIX_FT_NETSCALER_DB_DATABASE_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_DATABASE_NAME", "DB database name" +177, IPFIX_FT_NETSCALER_DB_CLIENT_HOST_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_CLIENT_HOST_NAME", "DB client host name" +178, IPFIX_FT_NETSCALER_DB_REQ_STRING, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_REQ_STRING", "DB request string" +179, IPFIX_FT_NETSCALER_DB_RESP_STATUS_STRING, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_RESP_STATUS_STRING", "Status of the response as indicated in the Database response" +180, IPFIX_FT_NETSCALER_DB_RESP_STATUS, 8, IPFIX_CODING_UINT, "NETSCALER_DB_RESP_STATUS", "SQL response status" +181, IPFIX_FT_NETSCALER_DB_RESP_LENGTH, 8, IPFIX_CODING_UINT, "NETSCALER_DB_RESP_LENGTH", "SQL response length" +182, IPFIX_FT_NETSCALER_CLIENT_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_CLIENT_RTT", "The RTT of the client is exported in the server side records" +183, IPFIX_FT_NETSCALER_HTTP_CONTENT_TYPE, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CONTENT_TYPE", "The Content Type string as seen in the HTTP header" +185, IPFIX_FT_NETSCALER_HTTP_REQ_AUTHORIZATION, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_AUTHORIZATION", "Value of the Authorization HTTP header" +186, IPFIX_FT_NETSCALER_HTTP_REQ_VIA, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_VIA", "Value of the Via HTTP header" +187, IPFIX_FT_NETSCALER_HTTP_RES_LOCATION, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_RES_LOCATION", "Value of the Location HTTP response header" +188, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_RES_SET_COOKIE", "value of the Set-Cookie HTTP response header" +189, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE2, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_RES_SET_COOKIE2", "value of the Set-Cookie2 HTTP response header" +190, IPFIX_FT_NETSCALER_HTTP_REQ_X_FORWARDED_FOR, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_X_FORWARDED_FOR", "value of the X-Forwarded-For HTTP header" +192, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_ID, 65535, IPFIX_CODING_BYTES, "NETSCALER_CONNECTION_CHAIN_ID", "This is a 16-byte ID that ties together all the TCP connections of ICA protocol, from client to the server that are terminated and established on layer-4 devices in the path. Since all these TCP connections belong to one logical connection from client to server, they will have the same connection chain ID." +193, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_HOP_COUNT, 1, IPFIX_CODING_UINT, "NETSCALER_CONNECTION_CHAIN_HOP_COUNT", "The hop count of the current device in the connection chain from client to server (see connection chain id for more details)" +200, IPFIX_FT_NETSCALER_ICA_SESSION_GUID, 65535, IPFIX_CODING_BYTES, "NETSCALER_ICA_SESSION_GUID", "This is a 16-byte ID that identifies an ICA session. This value is present in the ICA protocol header. With Excalibur actual session GUID will be present, which indicates a unique session established by a user. Pre-Excalibur, this is a random Value generated by Netscaler." +201, IPFIX_FT_NETSCALE_ICA_CLIENT_VERSION, 65535, IPFIX_CODING_STRING, "NETSCALE_ICA_CLIENT_VERSION", "Version of the ICA client" +202, IPFIX_FT_NETSCALER_ICA_CLIENT_TYPE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_TYPE", "Identifies the type of ICA client" +203, IPFIX_FT_NETSCALER_ICA_CLIENT_IP, 4, IPFIX_CODING_IP, "NETSCALER_ICA_CLIENT_IP", "The ICA client IP as sent by the Citrix Receiver" +204, IPFIX_FT_NETSCALER_ICA_CLIENT_HOSTNAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_CLIENT_HOSTNAME", "name of the ICA client host" +205, IPFIX_FT_NETSCALER_AAA_USERNAME, 65535, IPFIX_CODING_STRING, "NETSCALER_AAA_USERNAME", "If the connection is over VPN, the AAA username for the session" +207, IPFIX_FT_NETSCALER_ICA_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_DOMAIN_NAME", "Domain of the ICA client" +208, IPFIX_FT_NETSCALER_ICA_CLIENT_LAUNCHER, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_LAUNCHER", "Identifies the ICA launcher" +209, IPFIX_FT_NETSCALER_ICA_SESSION_SETUP_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_SETUP_TIME", "Number of seconds since Unix epoch (usually...?)" +210, IPFIX_FT_NETSCALER_ICA_SERVER_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_SERVER_NAME", "name of the ICA server" +214, IPFIX_FT_NETSCALER_ICA_SESSION_RECONNECTS, 1, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_RECONNECTS", "Number of times session reconnects happened" +215, IPFIX_FT_NETSCALER_ICA_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_RTT", "The ICA client sends a probe packet to the server, which sends back a response. Using this, the ICA process calculates the round trip time between the client and server which is exported to the appflow collector as ICA RTT." +216, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RX_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_RX_BYTES", "Number of bytes received on client ICA connection" +217, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_TX_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_TX_BYTES", "Number of bytes transmitted on client ICA connection" +219, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_PACKETS_RETRANSMIT", "Number of packets retransmitted on clientside connection" +220, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_PACKETS_RETRANSMIT", "Number of packets retransmitted on serverside connection" +221, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_RTT", "The TCP rtt on the client ICA connection" +222, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_RTT", "The TCP rtt on the server ICA connection" +223, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_BEGIN_SEC, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_UPDATE_BEGIN_SEC", "Absolute timestamp of end of ICA session update Number of seconds since Unix epoch (usually...?)" +224, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_END_SEC, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_UPDATE_END_SEC", "Absolute timestamp of beginning of ICA session update Number of seconds since Unix epoch (usually...?)" +225, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_1", "The IDs of the ICA channels opened" +226, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_1_BYTES", "The IDs of the ICA channels opened" +227, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_2", "The IDs of the ICA channels opened" +228, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_2_BYTES", "The IDs of the ICA channels opened" +229, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_3", "The IDs of the ICA channels opened" +230, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_3_BYTES", "The IDs of the ICA channels opened" +231, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_4", "The IDs of the ICA channels opened" +232, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_4_BYTES", "The IDs of the ICA channels opened" +233, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_5", "The IDs of the ICA channels opened" +234, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_5_BYTES", "The IDs of the ICA channels opened" +235, IPFIX_FT_NETSCALER_ICA_CONNECTION_PRIORITY, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CONNECTION_PRIORITY", "Identifies the priority of ICA connection" +236, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_DURATION, 4, IPFIX_CODING_UINT, "NETSCALER_APPLICATION_STARTUP_DURATION", "The time elapsed between the launch of an application and when it started running" +237, IPFIX_FT_NETSCALER_ICA_LAUNCH_MECHANISM, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_LAUNCH_MECHANISM", "The mechanism used to launch ICA applicaiton" +238, IPFIX_FT_NETSCALER_ICA_APPLICATION_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_APPLICATION_NAME", "ICA application name" +239, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_APPLICATION_STARTUP_TIME", "The time when an application started on the server Number of seconds since Unix epoch (usually...?)" +240, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TYPE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_APPLICATION_TERMINATION_TYPE", "Indicates how the application termination happened, eg: User closed the app, session termination, abort etc" +241, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_APPLICATION_TERMINATION_TIME", "The time when the application was terminated Number of seconds since Unix epoch (usually...?)" +242, IPFIX_FT_NETSCALER_ICA_SESSION_END_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_END_TIME", "The time when the ICA session ended Number of seconds since Unix epoch (usually...?)" +243, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_JITTER, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_JITTER", "The variance of client side RTT w.r.t the calculated RTT" +244, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_JITTER, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_JITTER", "The variance of server side RTT w.r.t the calculated RTT" +245, IPFIX_FT_NETSCALER_ICA_APP_PROCESS_ID, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_APP_PROCESS_ID", "The process ID of the application launched on the server" +246, IPFIX_FT_NETSCALER_ICA_APP_MODULE_PATH, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_APP_MODULE_PATH", "path of the ICA application being launched" +247, IPFIX_FT_NETSCALER_ICA_DEVICE_SERIAL_NO, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_DEVICE_SERIAL_NO", "Used in conjunction with clientcookie to identify primary connection and tie up streams of a MSI connection" +248, IPFIX_FT_NETSCALER_MSI_CLIENT_COOKIE, 65535, IPFIX_CODING_BYTES, "NETSCALER_MSI_CLIENT_COOKIE", "An identifier that helps to tie up multiple connections of the same session when Multi stream ICA is used. Should be same across all MSI connections" +249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_ICA_FLAGS", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported" +250, IPFIX_FT_NETSCALER_ICA_USERNAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_USERNAME", "Username for the ICA session" +251, IPFIX_FT_NETSCALER_LICENSE_TYPE, 1, IPFIX_CODING_UINT, "NETSCALER_LICENSE_TYPE", "" +252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "NETSCALER_MAX_LICENSE_COUNT", "249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_ICA_FLAGS", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported "" +253, IPFIX_FT_NETSCALER_CURRENT_LICENSE_CONSUMED, 8, IPFIX_CODING_UINT, "NETSCALER_CURRENT_LICENSE_CONSUMED", "252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "NETSCALER_MAX_LICENSE_COUNT", "249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_ICA_FLAGS", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported """ +254, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_START_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_NETWORK_UPDATE_START_TIME", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record began Number of seconds since Unix epoch (usually...?)" +255, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_END_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_NETWORK_UPDATE_END_TIME", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record ended Number of seconds since Unix epoch (usually...?)" +256, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_SRTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_SRTT", "RTT smoothed over the client side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" +257, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_SRTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_SRTT", "RTT smoothed over the server side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" +258, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_DELAY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_DELAY", "Indicates time taken by Netscaler to process this client side packet (NS introduced processing delay)" +259, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_DELAY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_DELAY", "Indicates time taken by Netscaler to process this server side packet (NS introduced processing delay)" +260, IPFIX_FT_NETSCALER_ICA_HOST_DELAY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_HOST_DELAY", "Indicates a portion of the ICA RTT measurement - time delay introduced at the Host while processing the packet" +261, IPFIX_FT_NETSCALER_ICA_CLIENTSIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENTSIDE_WINDOW_SIZE", "TCP window size on the client connection" +262, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_WINDOW_SIZE", "TCP window size on the server connection" +263, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_RTO_COUNT", "Number of times retransmission timeout occurred on client connection" +264, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_RTO_COUNT", "Number of times retransmission timeout occurred on server connection" +265, IPFIX_FT_NETSCALER_ICA_L7_CLIENT_LATENCY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_L7_CLIENT_LATENCY", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on client side pcb." +266, IPFIX_FT_NETSCALER_ICA_L7_SERVER_LATENCY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_L7_SERVER_LATENCY", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on server side pcb." +267, IPFIX_FT_NETSCALER_HTTP_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_DOMAIN_NAME", "HTTP domain name" +268, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_CORE_ID, 4, IPFIX_CODING_UINT, "NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_CORE_ID", "The client connection id is unique within a process. Hence the process id of the client connection is also passed to make the complete set unique." +269, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_TRANSACTION_ID", "When a request hits CR vserver and is redirected to the cache server and a cache miss happens, the cache sends a the request to the origin server. This request mostly comes back to the NS. This ID is used to link the cache request with the actual client request on the collector." From d81061aab5776310662101c8cd7ef249c3f27dde Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 13:26:17 +1300 Subject: [PATCH 03/48] Correct some errors in generated IEs for Netscaler. Link in new assets into build. Fix up missing dependency in Makefile. --- lib/.gitignore | 4 + lib/Makefile.in | 11 +- lib/ipfix_NETSCALER_IEs.txt | 243 +++++++++++++------------- lib/make-ipfix_def_netscaler_h.awk | 38 ++++ lib/make-ipfix_fields_netscaler_h.awk | 29 +++ 5 files changed, 203 insertions(+), 122 deletions(-) create mode 100644 lib/.gitignore create mode 100755 lib/make-ipfix_def_netscaler_h.awk create mode 100755 lib/make-ipfix_fields_netscaler_h.awk diff --git a/lib/.gitignore b/lib/.gitignore new file mode 100644 index 0000000..a825a85 --- /dev/null +++ b/lib/.gitignore @@ -0,0 +1,4 @@ +ipfix_def_netscaler.h +ipfix_fields_netscaler.h +ipfix_reverse_fields_netscaler.h + diff --git a/lib/Makefile.in b/lib/Makefile.in index c74b32e..a4fc90b 100644 --- a/lib/Makefile.in +++ b/lib/Makefile.in @@ -41,7 +41,7 @@ CCOPT = -Wall -g INCLS = -I. -I.. -I../libmisc CFLAGS = $(CCOPT) $(INCLS) $(DEFS) -TARGETS = ipfix_reverse_fields.h ipfix_def_fokus.h ipfix_fields_fokus.h libipfix.a libipfix.so +TARGETS = ipfix_reverse_fields.h ipfix_def_fokus.h ipfix_fields_fokus.h ipfix_def_netscaler.h ipfix_fields_netscaler.h ipfix_reverse_fields_netscaler.h libipfix.a libipfix.so SOURCES = ipfix.c ipfix_col.c ipfix_col_db.c ipfix_col_files.c ipfix_print.c OBJECTS = $(SOURCES:.c=.o) @IPFIX_DB_OBJ@ @IPFIX_SSL_OBJ@ DHPARAMS = dh512.pem dh1024.pem @@ -63,9 +63,18 @@ dhparams.c: $(DHPARAMS) $(OPENSSL) dh -noout -C < dh512.pem >> $@ $(OPENSSL) dh -noout -C < dh1024.pem >> $@ + +ipfix.c: ipfix_reverse_fields.h + ipfix_reverse_fields.h: ipfix_fields.h make-reverse-IPFIX_FIELDS_H.sed-script-file sed -f make-reverse-IPFIX_FIELDS_H.sed-script-file $< > $@ ipfix_%_fokus.h: ipfix_FOKUS_IEs.txt make-ipfix_%_fokus_h.awk awk -f make-ipfix_$*_fokus_h.awk $< > $@ +ipfix_%_netscaler.h: ipfix_NETSCALER_IEs.txt make-ipfix_%_netscaler_h.awk + awk -f make-ipfix_$*_netscaler_h.awk $< > $@ + +ipfix_reverse_%_netscaler.h: ipfix_%_netscaler.h make-ipfix_%_netscaler_h.awk + sed -f make-reverse-IPFIX_FIELDS_H.sed-script-file $< > $@ + diff --git a/lib/ipfix_NETSCALER_IEs.txt b/lib/ipfix_NETSCALER_IEs.txt index 520a0fd..ef937b5 100644 --- a/lib/ipfix_NETSCALER_IEs.txt +++ b/lib/ipfix_NETSCALER_IEs.txt @@ -1,121 +1,122 @@ -128, IPFIX_FT_NETSCALER_ROUND_TRIP_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ROUND_TRIP_TIME", "The TCP RTT of the flow in milliseconds since the time last record was sent" -129, IPFIX_FT_NETSCALER_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "NETSCALER_TRANSACTION_ID", "At Layer-7, the four flows of a transaction between client and server (client-to-NS, NS-to-Server, Server-to-NS, NS-to-Client) are tied together using the transaction ID." -130, IPFIX_FT_NETSCALER_HTTP_REQ_URL, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_URL", "HTTP request URL" -131, IPFIX_FT_NETSCALER_HTTP_REQ_COOKIE, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_COOKIE", "Value of Cookie header present in HTTP request" -132, IPFIX_FT_NETSCALER_FLOW_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_FLOW_FLAGS", "application layer flags, for use between the exporter and collector to indicate various Layer-7 events and types like the direction of the flow (client-in, svc-out, etc.), http version, NetScaler cache served responses, SSL, compression, TCP buffering, and many more." -133, IPFIX_FT_NETSCALER_CONNECTION_ID, 4, IPFIX_CODING_UINT, "NETSCALER_CONNECTION_ID", "The two flows of a TCP connection are tied together with a connection ID" -134, IPFIX_FT_NETSCALER_SYSLOG_PRIORITY, 1, IPFIX_CODING_UINT, "NETSCALER_SYSLOG_PRIORITY", "Priority of the syslog message being logged" -135, IPFIX_FT_NETSCALER_SYSLOG_MESSAGE, 65535, IPFIX_CODING_STRING, "NETSCALER_SYSLOG_MESSAGE", "The syslog message generated on Netscaler" -136, IPFIX_FT_NETSCALER_SYSLOG_TIMESTAMP, 8, IPFIX_CODING_UINT, "NETSCALER_SYSLOG_TIMESTAMP", "Timestamp when the syslog (contained in the syslog record) was generated Number of milliseconds since Unix epoch" -140, IPFIX_FT_NETSCALER_HTTP_REQ_REFERER, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_REFERER", "Value of Referer header present in HTTP request" -141, IPFIX_FT_NETSCALER_HTTP_REQ_METHOD, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_METHOD", "The request method in HTTP request" -142, IPFIX_FT_NETSCALER_HTTP_REQ_HOST, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_HOST", "Value of Host header in HTTP request" -143, IPFIX_FT_NETSCALER_HTTP_REQ_USER_AGENT, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_USER_AGENT", "The User Agent string as seen in HTTP request header" -144, IPFIX_FT_NETSCALER_HTTP_RSP_STATUS, 2, IPFIX_CODING_UINT, "NETSCALER_HTTP_RSP_STATUS", "Status of HTTP response" -145, IPFIX_FT_NETSCALER_HTTP_RSP_LEN, 8, IPFIX_CODING_UINT, "NETSCALER_HTTP_RSP_LEN", "The total size of HTTP response" -146, IPFIX_FT_NETSCALER_SERVER_TTFB, 8, IPFIX_CODING_UINT, "NETSCALER_SERVER_TTFB", "Time elapsed in microseconds between receiving of request from the client and receiving the first byte of response from server" -147, IPFIX_FT_NETSCALER_SERVER_TTLB, 8, IPFIX_CODING_UINT, "NETSCALER_SERVER_TTLB", "Time elapsed in microseconds between receiving of request from the client and receiving the last byte of response from server" -150, IPFIX_FT_NETSCALER_APP_NAME_INCARNATION_NUMBER, 4, IPFIX_CODING_UINT, "NETSCALER_APP_NAME_INCARNATION_NUMBER", "Each named entity in the NetScaler is associated with an id. The name to id mapping is sent in the appname mapping template to the collector. Other records only contain the id of the entity and the corresponding name is stored by the collector and used when required. If a new entity gets added or an entity gets removed or modified, the incarnation number changes which indicates the collector to use the appname mapping record to update its database" -151, IPFIX_FT_NETSCALER_APP_NAME_APP_ID, 4, IPFIX_CODING_UINT, "NETSCALER_APP_NAME_APP_ID", "The id of a named entity" -152, IPFIX_FT_NETSCALER_APP_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_APP_NAME", "Name of the entity configured on Netscaler for which the name-to-id mapping is being sent in the current record" -153, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_RCV_FB", "Timestamp when the first byte of request was received from client at the NetScaler. Uses an NTP format of date/time" -156, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_FORW_FB", "Timestamp when the first byte of request was forwarded to server from the NetScaler. Uses an NTP format of date/time" -157, IPFIX_FT_NETSCALER_HTTP_RES_RCV_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_RCV_FB", "Timestamp when the first byte of response was received from server at the NetScaler. Uses an NTP format of date/time" -158, IPFIX_FT_NETSCALER_HTTP_RES_FORW_FB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_FORW_FB", "Timestamp when the first byte of response was forwarded to client from the NetScaler. Uses an NTP format of date/time" -159, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_RCV_LB", "Timestamp when the last byte of request was received from client at the NetScaler. Uses an NTP format of date/time" -160, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_REQ_FORW_LB", "Uses an NTP format of date/time" -161, IPFIX_FT_NETSCALER_MAIN_PAGE_ID, 4, IPFIX_CODING_UINT, "NETSCALER_MAIN_PAGE_ID", "In a html page, the main page transaction is associated with all its embedded object transactions. Each such embedded object transaction record contains the transaction Id of the main page so that a parent link to the main transaction can be created. This is used in generating a waterfall chart model depicting the various timing information of the entire page loading." -162, IPFIX_FT_NETSCALER_MAIN_PAGE_COREID, 4, IPFIX_CODING_UINT, "NETSCALER_MAIN_PAGE_COREID", "The above transaction ID is unique within the process. Hence the exporting process ID of the main page is also required." -163, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME", "The timestamp when the page starts loading" -164, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_END_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_RENDER_END_TIME", "The timestamp when the page completely renders" -165, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_START_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_RENDER_START_TIME", "The timestamp when page rendering begins" -167, IPFIX_FT_NETSCALER_APP_TEMPLATE_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_APP_TEMPLATE_NAME", "Name of the template to which the current entity belongs (see netscalerAppTemplateID)" -168, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME", "The NTP timestamp when the HTML page becomes interactive to the user" -169, IPFIX_FT_NETSCALER_HTTP_RES_RCV_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_RCV_LB", "Uses an NTP format of date/time" -170, IPFIX_FT_NETSCALER_HTTP_RES_FORW_LB, 8, IPFIX_CODING_HEX, "NETSCALER_HTTP_RES_FORW_LB", "Uses an NTP format of date/time" -171, IPFIX_FT_NETSCALER_APP_UNIT_NAME_APP_ID, 4, IPFIX_CODING_UINT, "NETSCALER_APP_UNIT_NAME_APP_ID", "Netscaler uses application templates that groups a set of entities which can be exported and imported when needed. This Information Element exportes the ID of the application template to which the entity belongs." -172, IPFIX_FT_NETSCALER_DB_LOGIN_FLAGS, 4, IPFIX_CODING_UINT, "NETSCALER_DB_LOGIN_FLAGS", "SQL login flags" -173, IPFIX_FT_NETSCALER_DB_REQ_TYPE, 1, IPFIX_CODING_UINT, "NETSCALER_DB_REQ_TYPE", "The type of database request" -174, IPFIX_FT_NETSCALER_DB_PROTOCOL_NAME, 1, IPFIX_CODING_UINT, "NETSCALER_DB_PROTOCOL_NAME", "The database protocol being used" -175, IPFIX_FT_NETSCALER_DB_USER_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_USER_NAME", "Database username" -176, IPFIX_FT_NETSCALER_DB_DATABASE_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_DATABASE_NAME", "DB database name" -177, IPFIX_FT_NETSCALER_DB_CLIENT_HOST_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_CLIENT_HOST_NAME", "DB client host name" -178, IPFIX_FT_NETSCALER_DB_REQ_STRING, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_REQ_STRING", "DB request string" -179, IPFIX_FT_NETSCALER_DB_RESP_STATUS_STRING, 65535, IPFIX_CODING_STRING, "NETSCALER_DB_RESP_STATUS_STRING", "Status of the response as indicated in the Database response" -180, IPFIX_FT_NETSCALER_DB_RESP_STATUS, 8, IPFIX_CODING_UINT, "NETSCALER_DB_RESP_STATUS", "SQL response status" -181, IPFIX_FT_NETSCALER_DB_RESP_LENGTH, 8, IPFIX_CODING_UINT, "NETSCALER_DB_RESP_LENGTH", "SQL response length" -182, IPFIX_FT_NETSCALER_CLIENT_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_CLIENT_RTT", "The RTT of the client is exported in the server side records" -183, IPFIX_FT_NETSCALER_HTTP_CONTENT_TYPE, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_CONTENT_TYPE", "The Content Type string as seen in the HTTP header" -185, IPFIX_FT_NETSCALER_HTTP_REQ_AUTHORIZATION, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_AUTHORIZATION", "Value of the Authorization HTTP header" -186, IPFIX_FT_NETSCALER_HTTP_REQ_VIA, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_VIA", "Value of the Via HTTP header" -187, IPFIX_FT_NETSCALER_HTTP_RES_LOCATION, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_RES_LOCATION", "Value of the Location HTTP response header" -188, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_RES_SET_COOKIE", "value of the Set-Cookie HTTP response header" -189, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE2, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_RES_SET_COOKIE2", "value of the Set-Cookie2 HTTP response header" -190, IPFIX_FT_NETSCALER_HTTP_REQ_X_FORWARDED_FOR, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_REQ_X_FORWARDED_FOR", "value of the X-Forwarded-For HTTP header" -192, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_ID, 65535, IPFIX_CODING_BYTES, "NETSCALER_CONNECTION_CHAIN_ID", "This is a 16-byte ID that ties together all the TCP connections of ICA protocol, from client to the server that are terminated and established on layer-4 devices in the path. Since all these TCP connections belong to one logical connection from client to server, they will have the same connection chain ID." -193, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_HOP_COUNT, 1, IPFIX_CODING_UINT, "NETSCALER_CONNECTION_CHAIN_HOP_COUNT", "The hop count of the current device in the connection chain from client to server (see connection chain id for more details)" -200, IPFIX_FT_NETSCALER_ICA_SESSION_GUID, 65535, IPFIX_CODING_BYTES, "NETSCALER_ICA_SESSION_GUID", "This is a 16-byte ID that identifies an ICA session. This value is present in the ICA protocol header. With Excalibur actual session GUID will be present, which indicates a unique session established by a user. Pre-Excalibur, this is a random Value generated by Netscaler." -201, IPFIX_FT_NETSCALE_ICA_CLIENT_VERSION, 65535, IPFIX_CODING_STRING, "NETSCALE_ICA_CLIENT_VERSION", "Version of the ICA client" -202, IPFIX_FT_NETSCALER_ICA_CLIENT_TYPE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_TYPE", "Identifies the type of ICA client" -203, IPFIX_FT_NETSCALER_ICA_CLIENT_IP, 4, IPFIX_CODING_IP, "NETSCALER_ICA_CLIENT_IP", "The ICA client IP as sent by the Citrix Receiver" -204, IPFIX_FT_NETSCALER_ICA_CLIENT_HOSTNAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_CLIENT_HOSTNAME", "name of the ICA client host" -205, IPFIX_FT_NETSCALER_AAA_USERNAME, 65535, IPFIX_CODING_STRING, "NETSCALER_AAA_USERNAME", "If the connection is over VPN, the AAA username for the session" -207, IPFIX_FT_NETSCALER_ICA_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_DOMAIN_NAME", "Domain of the ICA client" -208, IPFIX_FT_NETSCALER_ICA_CLIENT_LAUNCHER, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_LAUNCHER", "Identifies the ICA launcher" -209, IPFIX_FT_NETSCALER_ICA_SESSION_SETUP_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_SETUP_TIME", "Number of seconds since Unix epoch (usually...?)" -210, IPFIX_FT_NETSCALER_ICA_SERVER_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_SERVER_NAME", "name of the ICA server" -214, IPFIX_FT_NETSCALER_ICA_SESSION_RECONNECTS, 1, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_RECONNECTS", "Number of times session reconnects happened" -215, IPFIX_FT_NETSCALER_ICA_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_RTT", "The ICA client sends a probe packet to the server, which sends back a response. Using this, the ICA process calculates the round trip time between the client and server which is exported to the appflow collector as ICA RTT." -216, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RX_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_RX_BYTES", "Number of bytes received on client ICA connection" -217, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_TX_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_TX_BYTES", "Number of bytes transmitted on client ICA connection" -219, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_PACKETS_RETRANSMIT", "Number of packets retransmitted on clientside connection" -220, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_PACKETS_RETRANSMIT", "Number of packets retransmitted on serverside connection" -221, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_RTT", "The TCP rtt on the client ICA connection" -222, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_RTT", "The TCP rtt on the server ICA connection" -223, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_BEGIN_SEC, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_UPDATE_BEGIN_SEC", "Absolute timestamp of end of ICA session update Number of seconds since Unix epoch (usually...?)" -224, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_END_SEC, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_UPDATE_END_SEC", "Absolute timestamp of beginning of ICA session update Number of seconds since Unix epoch (usually...?)" -225, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_1", "The IDs of the ICA channels opened" -226, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_1_BYTES", "The IDs of the ICA channels opened" -227, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_2", "The IDs of the ICA channels opened" -228, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_2_BYTES", "The IDs of the ICA channels opened" -229, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_3", "The IDs of the ICA channels opened" -230, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_3_BYTES", "The IDs of the ICA channels opened" -231, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_4", "The IDs of the ICA channels opened" -232, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_4_BYTES", "The IDs of the ICA channels opened" -233, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_5", "The IDs of the ICA channels opened" -234, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5_BYTES, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CHANNEL_ID_5_BYTES", "The IDs of the ICA channels opened" -235, IPFIX_FT_NETSCALER_ICA_CONNECTION_PRIORITY, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CONNECTION_PRIORITY", "Identifies the priority of ICA connection" -236, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_DURATION, 4, IPFIX_CODING_UINT, "NETSCALER_APPLICATION_STARTUP_DURATION", "The time elapsed between the launch of an application and when it started running" -237, IPFIX_FT_NETSCALER_ICA_LAUNCH_MECHANISM, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_LAUNCH_MECHANISM", "The mechanism used to launch ICA applicaiton" -238, IPFIX_FT_NETSCALER_ICA_APPLICATION_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_APPLICATION_NAME", "ICA application name" -239, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_APPLICATION_STARTUP_TIME", "The time when an application started on the server Number of seconds since Unix epoch (usually...?)" -240, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TYPE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_APPLICATION_TERMINATION_TYPE", "Indicates how the application termination happened, eg: User closed the app, session termination, abort etc" -241, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_APPLICATION_TERMINATION_TIME", "The time when the application was terminated Number of seconds since Unix epoch (usually...?)" -242, IPFIX_FT_NETSCALER_ICA_SESSION_END_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SESSION_END_TIME", "The time when the ICA session ended Number of seconds since Unix epoch (usually...?)" -243, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_JITTER, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_JITTER", "The variance of client side RTT w.r.t the calculated RTT" -244, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_JITTER, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_JITTER", "The variance of server side RTT w.r.t the calculated RTT" -245, IPFIX_FT_NETSCALER_ICA_APP_PROCESS_ID, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_APP_PROCESS_ID", "The process ID of the application launched on the server" -246, IPFIX_FT_NETSCALER_ICA_APP_MODULE_PATH, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_APP_MODULE_PATH", "path of the ICA application being launched" -247, IPFIX_FT_NETSCALER_ICA_DEVICE_SERIAL_NO, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_DEVICE_SERIAL_NO", "Used in conjunction with clientcookie to identify primary connection and tie up streams of a MSI connection" -248, IPFIX_FT_NETSCALER_MSI_CLIENT_COOKIE, 65535, IPFIX_CODING_BYTES, "NETSCALER_MSI_CLIENT_COOKIE", "An identifier that helps to tie up multiple connections of the same session when Multi stream ICA is used. Should be same across all MSI connections" -249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_ICA_FLAGS", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported" -250, IPFIX_FT_NETSCALER_ICA_USERNAME, 65535, IPFIX_CODING_STRING, "NETSCALER_ICA_USERNAME", "Username for the ICA session" -251, IPFIX_FT_NETSCALER_LICENSE_TYPE, 1, IPFIX_CODING_UINT, "NETSCALER_LICENSE_TYPE", "" -252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "NETSCALER_MAX_LICENSE_COUNT", "249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_ICA_FLAGS", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported "" -253, IPFIX_FT_NETSCALER_CURRENT_LICENSE_CONSUMED, 8, IPFIX_CODING_UINT, "NETSCALER_CURRENT_LICENSE_CONSUMED", "252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "NETSCALER_MAX_LICENSE_COUNT", "249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "NETSCALER_ICA_FLAGS", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported """ -254, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_START_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_NETWORK_UPDATE_START_TIME", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record began Number of seconds since Unix epoch (usually...?)" -255, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_END_TIME, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_NETWORK_UPDATE_END_TIME", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record ended Number of seconds since Unix epoch (usually...?)" -256, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_SRTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_SRTT", "RTT smoothed over the client side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" -257, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_SRTT, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_SRTT", "RTT smoothed over the server side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" -258, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_DELAY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_DELAY", "Indicates time taken by Netscaler to process this client side packet (NS introduced processing delay)" -259, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_DELAY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_DELAY", "Indicates time taken by Netscaler to process this server side packet (NS introduced processing delay)" -260, IPFIX_FT_NETSCALER_ICA_HOST_DELAY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_HOST_DELAY", "Indicates a portion of the ICA RTT measurement - time delay introduced at the Host while processing the packet" -261, IPFIX_FT_NETSCALER_ICA_CLIENTSIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENTSIDE_WINDOW_SIZE", "TCP window size on the client connection" -262, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_WINDOW_SIZE", "TCP window size on the server connection" -263, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_CLIENT_SIDE_RTO_COUNT", "Number of times retransmission timeout occurred on client connection" -264, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "NETSCALER_ICA_SERVER_SIDE_RTO_COUNT", "Number of times retransmission timeout occurred on server connection" -265, IPFIX_FT_NETSCALER_ICA_L7_CLIENT_LATENCY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_L7_CLIENT_LATENCY", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on client side pcb." -266, IPFIX_FT_NETSCALER_ICA_L7_SERVER_LATENCY, 4, IPFIX_CODING_UINT, "NETSCALER_ICA_L7_SERVER_LATENCY", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on server side pcb." -267, IPFIX_FT_NETSCALER_HTTP_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "NETSCALER_HTTP_DOMAIN_NAME", "HTTP domain name" -268, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_CORE_ID, 4, IPFIX_CODING_UINT, "NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_CORE_ID", "The client connection id is unique within a process. Hence the process id of the client connection is also passed to make the complete set unique." -269, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_TRANSACTION_ID", "When a request hits CR vserver and is redirected to the cache server and a cache miss happens, the cache sends a the request to the origin server. This request mostly comes back to the NS. This ID is used to link the cache request with the actual client request on the collector." +128, IPFIX_FT_NETSCALER_ROUND_TRIP_TIME, 4, IPFIX_CODING_UINT, "netscaler_round_trip_time", "The TCP RTT of the flow in milliseconds since the time last record was sent" +129, IPFIX_FT_NETSCALER_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "netscaler_transaction_id", "At Layer-7, the four flows of a transaction between client and server (client-to-NS, NS-to-Server, Server-to-NS, NS-to-Client) are tied together using the transaction ID." +130, IPFIX_FT_NETSCALER_HTTP_REQ_URL, 65535, IPFIX_CODING_STRING, "netscaler_http_req_url", "HTTP request URL" +131, IPFIX_FT_NETSCALER_HTTP_REQ_COOKIE, 65535, IPFIX_CODING_STRING, "netscaler_http_req_cookie", "Value of Cookie header present in HTTP request" +132, IPFIX_FT_NETSCALER_FLOW_FLAGS, 8, IPFIX_CODING_UINT, "netscaler_flow_flags", "application layer flags, for use between the exporter and collector to indicate various Layer-7 events and types like the direction of the flow (client-in, svc-out, etc.), http version, NetScaler cache served responses, SSL, compression, TCP buffering, and many more." +133, IPFIX_FT_NETSCALER_CONNECTION_ID, 4, IPFIX_CODING_UINT, "netscaler_connection_id", "The two flows of a TCP connection are tied together with a connection ID" +134, IPFIX_FT_NETSCALER_SYSLOG_PRIORITY, 1, IPFIX_CODING_UINT, "netscaler_syslog_priority", "Priority of the syslog message being logged" +135, IPFIX_FT_NETSCALER_SYSLOG_MESSAGE, 65535, IPFIX_CODING_STRING, "netscaler_syslog_message", "The syslog message generated on Netscaler" +136, IPFIX_FT_NETSCALER_SYSLOG_TIMESTAMP, 8, IPFIX_CODING_UINT, "netscaler_syslog_timestamp", "Timestamp when the syslog (contained in the syslog record) was generated Number of milliseconds since Unix epoch" +140, IPFIX_FT_NETSCALER_HTTP_REQ_REFERER, 65535, IPFIX_CODING_STRING, "netscaler_http_req_referer", "Value of Referer header present in HTTP request" +141, IPFIX_FT_NETSCALER_HTTP_REQ_METHOD, 65535, IPFIX_CODING_STRING, "netscaler_http_req_method", "The request method in HTTP request" +142, IPFIX_FT_NETSCALER_HTTP_REQ_HOST, 65535, IPFIX_CODING_STRING, "netscaler_http_req_host", "Value of Host header in HTTP request" +143, IPFIX_FT_NETSCALER_HTTP_REQ_USER_AGENT, 65535, IPFIX_CODING_STRING, "netscaler_http_req_user_agent", "The User Agent string as seen in HTTP request header" +144, IPFIX_FT_NETSCALER_HTTP_RSP_STATUS, 2, IPFIX_CODING_UINT, "netscaler_http_rsp_status", "Status of HTTP response" +145, IPFIX_FT_NETSCALER_HTTP_RSP_LEN, 8, IPFIX_CODING_UINT, "netscaler_http_rsp_len", "The total size of HTTP response" +146, IPFIX_FT_NETSCALER_SERVER_TTFB, 8, IPFIX_CODING_UINT, "netscaler_server_ttfb", "Time elapsed in microseconds between receiving of request from the client and receiving the first byte of response from server" +147, IPFIX_FT_NETSCALER_SERVER_TTLB, 8, IPFIX_CODING_UINT, "netscaler_server_ttlb", "Time elapsed in microseconds between receiving of request from the client and receiving the last byte of response from server" +150, IPFIX_FT_NETSCALER_APP_NAME_INCARNATION_NUMBER, 4, IPFIX_CODING_UINT, "netscaler_app_name_incarnation_number", "Each named entity in the NetScaler is associated with an id. The name to id mapping is sent in the appname mapping template to the collector. Other records only contain the id of the entity and the corresponding name is stored by the collector and used when required. If a new entity gets added or an entity gets removed or modified, the incarnation number changes which indicates the collector to use the appname mapping record to update its database" +151, IPFIX_FT_NETSCALER_APP_NAME_APP_ID, 4, IPFIX_CODING_UINT, "netscaler_app_name_app_id", "The id of a named entity" +152, IPFIX_FT_NETSCALER_APP_NAME, 65535, IPFIX_CODING_STRING, "netscaler_app_name", "Name of the entity configured on Netscaler for which the name-to-id mapping is being sent in the current record" +153, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_FB, 8, IPFIX_CODING_HEX, "netscaler_http_req_rcv_fb", "Timestamp when the first byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +156, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_FB, 8, IPFIX_CODING_HEX, "netscaler_http_req_forw_fb", "Timestamp when the first byte of request was forwarded to server from the NetScaler. Uses an NTP format of date/time" +157, IPFIX_FT_NETSCALER_HTTP_RES_RCV_FB, 8, IPFIX_CODING_HEX, "netscaler_http_res_rcv_fb", "Timestamp when the first byte of response was received from server at the NetScaler. Uses an NTP format of date/time" +158, IPFIX_FT_NETSCALER_HTTP_RES_FORW_FB, 8, IPFIX_CODING_HEX, "netscaler_http_res_forw_fb", "Timestamp when the first byte of response was forwarded to client from the NetScaler. Uses an NTP format of date/time" +159, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_LB, 8, IPFIX_CODING_HEX, "netscaler_http_req_rcv_lb", "Timestamp when the last byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +160, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_LB, 8, IPFIX_CODING_HEX, "netscaler_http_req_forw_lb", "Uses an NTP format of date/time" +161, IPFIX_FT_NETSCALER_MAIN_PAGE_ID, 4, IPFIX_CODING_UINT, "netscaler_main_page_id", "In a html page, the main page transaction is associated with all its embedded object transactions. Each such embedded object transaction record contains the transaction Id of the main page so that a parent link to the main transaction can be created. This is used in generating a waterfall chart model depicting the various timing information of the entire page loading." +162, IPFIX_FT_NETSCALER_MAIN_PAGE_COREID, 4, IPFIX_CODING_UINT, "netscaler_main_page_coreid", "The above transaction ID is unique within the process. Hence the exporting process ID of the main page is also required." +163, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_interaction_start_time", "The timestamp when the page starts loading" +164, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_END_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_render_end_time", "The timestamp when the page completely renders" +165, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_START_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_render_start_time", "The timestamp when page rendering begins" +167, IPFIX_FT_NETSCALER_APP_TEMPLATE_NAME, 65535, IPFIX_CODING_STRING, "netscaler_app_template_name", "Name of the template to which the current entity belongs (see netscalerAppTemplateID)" +168, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_interaction_end_time", "The NTP timestamp when the HTML page becomes interactive to the user" +169, IPFIX_FT_NETSCALER_HTTP_RES_RCV_LB, 8, IPFIX_CODING_HEX, "netscaler_http_res_rcv_lb", "Uses an NTP format of date/time" +170, IPFIX_FT_NETSCALER_HTTP_RES_FORW_LB, 8, IPFIX_CODING_HEX, "netscaler_http_res_forw_lb", "Uses an NTP format of date/time" +171, IPFIX_FT_NETSCALER_APP_UNIT_NAME_APP_ID, 4, IPFIX_CODING_UINT, "netscaler_app_unit_name_app_id", "Netscaler uses application templates that groups a set of entities which can be exported and imported when needed. This Information Element exportes the ID of the application template to which the entity belongs." +172, IPFIX_FT_NETSCALER_DB_LOGIN_FLAGS, 4, IPFIX_CODING_UINT, "netscaler_db_login_flags", "SQL login flags" +173, IPFIX_FT_NETSCALER_DB_REQ_TYPE, 1, IPFIX_CODING_UINT, "netscaler_db_req_type", "The type of database request" +174, IPFIX_FT_NETSCALER_DB_PROTOCOL_NAME, 1, IPFIX_CODING_UINT, "netscaler_db_protocol_name", "The database protocol being used" +175, IPFIX_FT_NETSCALER_DB_USER_NAME, 65535, IPFIX_CODING_STRING, "netscaler_db_user_name", "Database username" +176, IPFIX_FT_NETSCALER_DB_DATABASE_NAME, 65535, IPFIX_CODING_STRING, "netscaler_db_database_name", "DB database name" +177, IPFIX_FT_NETSCALER_DB_CLIENT_HOST_NAME, 65535, IPFIX_CODING_STRING, "netscaler_db_client_host_name", "DB client host name" +178, IPFIX_FT_NETSCALER_DB_REQ_STRING, 65535, IPFIX_CODING_STRING, "netscaler_db_req_string", "DB request string" +179, IPFIX_FT_NETSCALER_DB_RESP_STATUS_STRING, 65535, IPFIX_CODING_STRING, "netscaler_db_resp_status_string", "Status of the response as indicated in the Database response" +180, IPFIX_FT_NETSCALER_DB_RESP_STATUS, 8, IPFIX_CODING_UINT, "netscaler_db_resp_status", "SQL response status" +181, IPFIX_FT_NETSCALER_DB_RESP_LENGTH, 8, IPFIX_CODING_UINT, "netscaler_db_resp_length", "SQL response length" +182, IPFIX_FT_NETSCALER_CLIENT_RTT, 4, IPFIX_CODING_UINT, "netscaler_client_rtt", "The RTT of the client is exported in the server side records" +183, IPFIX_FT_NETSCALER_HTTP_CONTENT_TYPE, 65535, IPFIX_CODING_STRING, "netscaler_http_content_type", "The Content Type string as seen in the HTTP header" +185, IPFIX_FT_NETSCALER_HTTP_REQ_AUTHORIZATION, 65535, IPFIX_CODING_STRING, "netscaler_http_req_authorization", "Value of the Authorization HTTP header" +186, IPFIX_FT_NETSCALER_HTTP_REQ_VIA, 65535, IPFIX_CODING_STRING, "netscaler_http_req_via", "Value of the Via HTTP header" +187, IPFIX_FT_NETSCALER_HTTP_RES_LOCATION, 65535, IPFIX_CODING_STRING, "netscaler_http_res_location", "Value of the Location HTTP response header" +188, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE, 65535, IPFIX_CODING_STRING, "netscaler_http_res_set_cookie", "value of the Set-Cookie HTTP response header" +189, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE2, 65535, IPFIX_CODING_STRING, "netscaler_http_res_set_cookie2", "value of the Set-Cookie2 HTTP response header" +190, IPFIX_FT_NETSCALER_HTTP_REQ_X_FORWARDED_FOR, 65535, IPFIX_CODING_STRING, "netscaler_http_req_x_forwarded_for", "value of the X-Forwarded-For HTTP header" +192, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_ID, 65535, IPFIX_CODING_BYTES, "netscaler_connection_chain_id", "This is a 16-byte ID that ties together all the TCP connections of ICA protocol, from client to the server that are terminated and established on layer-4 devices in the path. Since all these TCP connections belong to one logical connection from client to server, they will have the same connection chain ID." +193, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_HOP_COUNT, 1, IPFIX_CODING_UINT, "netscaler_connection_chain_hop_count", "The hop count of the current device in the connection chain from client to server (see connection chain id for more details)" +200, IPFIX_FT_NETSCALER_ICA_SESSION_GUID, 65535, IPFIX_CODING_BYTES, "netscaler_ica_session_guid", "This is a 16-byte ID that identifies an ICA session. This value is present in the ICA protocol header. With Excalibur actual session GUID will be present, which indicates a unique session established by a user. Pre-Excalibur, this is a random Value generated by Netscaler." +201, IPFIX_FT_NETSCALE_ICA_CLIENT_VERSION, 65535, IPFIX_CODING_STRING, "NETSCALE_ICA_CLIENT_VERSION", "Version of the ICA client" +202, IPFIX_FT_NETSCALER_ICA_CLIENT_TYPE, 2, IPFIX_CODING_UINT, "netscaler_ica_client_type", "Identifies the type of ICA client" +203, IPFIX_FT_NETSCALER_ICA_CLIENT_IP, 4, IPFIX_CODING_IP, "netscaler_ica_client_ip", "The ICA client IP as sent by the Citrix Receiver" +204, IPFIX_FT_NETSCALER_ICA_CLIENT_HOSTNAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_client_hostname", "name of the ICA client host" +205, IPFIX_FT_NETSCALER_AAA_USERNAME, 65535, IPFIX_CODING_STRING, "netscaler_aaa_username", "If the connection is over VPN, the AAA username for the session" +207, IPFIX_FT_NETSCALER_ICA_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_domain_name", "Domain of the ICA client" +208, IPFIX_FT_NETSCALER_ICA_CLIENT_LAUNCHER, 2, IPFIX_CODING_UINT, "netscaler_ica_client_launcher", "Identifies the ICA launcher" +209, IPFIX_FT_NETSCALER_ICA_SESSION_SETUP_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_session_setup_time", "Number of seconds since Unix epoch (usually...?)" +210, IPFIX_FT_NETSCALER_ICA_SERVER_NAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_server_name", "name of the ICA server" +214, IPFIX_FT_NETSCALER_ICA_SESSION_RECONNECTS, 1, IPFIX_CODING_UINT, "netscaler_ica_session_reconnects", "Number of times session reconnects happened" +215, IPFIX_FT_NETSCALER_ICA_RTT, 4, IPFIX_CODING_UINT, "netscaler_ica_rtt", "The ICA client sends a probe packet to the server, which sends back a response. Using this, the ICA process calculates the round trip time between the client and server which is exported to the appflow collector as ICA RTT." +216, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RX_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_rx_bytes", "Number of bytes received on client ICA connection" +217, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_TX_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_tx_bytes", "Number of bytes transmitted on client ICA connection" +219, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "netscaler_ica_client_side_packets_retransmit", "Number of packets retransmitted on clientside connection" +220, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "netscaler_ica_server_side_packets_retransmit", "Number of packets retransmitted on serverside connection" +221, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTT, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_rtt", "The TCP rtt on the client ICA connection" +222, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTT, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_rtt", "The TCP rtt on the server ICA connection" +223, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_BEGIN_SEC, 4, IPFIX_CODING_UINT, "netscaler_ica_session_update_begin_sec", "Absolute timestamp of end of ICA session update Number of seconds since Unix epoch (usually...?)" +224, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_END_SEC, 4, IPFIX_CODING_UINT, "netscaler_ica_session_update_end_sec", "Absolute timestamp of beginning of ICA session update Number of seconds since Unix epoch (usually...?)" +225, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_1", "The IDs of the ICA channels opened" +226, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_1_bytes", "The IDs of the ICA channels opened" +227, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_2", "The IDs of the ICA channels opened" +228, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_2_bytes", "The IDs of the ICA channels opened" +229, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_3", "The IDs of the ICA channels opened" +230, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_3_bytes", "The IDs of the ICA channels opened" +231, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_4", "The IDs of the ICA channels opened" +232, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_4_bytes", "The IDs of the ICA channels opened" +233, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_5", "The IDs of the ICA channels opened" +234, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_5_bytes", "The IDs of the ICA channels opened" +235, IPFIX_FT_NETSCALER_ICA_CONNECTION_PRIORITY, 2, IPFIX_CODING_UINT, "netscaler_ica_connection_priority", "Identifies the priority of ICA connection" +236, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_DURATION, 4, IPFIX_CODING_UINT, "netscaler_application_startup_duration", "The time elapsed between the launch of an application and when it started running" +237, IPFIX_FT_NETSCALER_ICA_LAUNCH_MECHANISM, 2, IPFIX_CODING_UINT, "netscaler_ica_launch_mechanism", "The mechanism used to launch ICA applicaiton" +238, IPFIX_FT_NETSCALER_ICA_APPLICATION_NAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_application_name", "ICA application name" +239, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_TIME, 4, IPFIX_CODING_UINT, "netscaler_application_startup_time", "The time when an application started on the server Number of seconds since Unix epoch (usually...?)" +240, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TYPE, 2, IPFIX_CODING_UINT, "netscaler_ica_application_termination_type", "Indicates how the application termination happened, eg: User closed the app, session termination, abort etc" +241, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_application_termination_time", "The time when the application was terminated Number of seconds since Unix epoch (usually...?)" +242, IPFIX_FT_NETSCALER_ICA_SESSION_END_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_session_end_time", "The time when the ICA session ended Number of seconds since Unix epoch (usually...?)" +243, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_JITTER, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_jitter", "The variance of client side RTT w.r.t the calculated RTT" +244, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_JITTER, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_jitter", "The variance of server side RTT w.r.t the calculated RTT" +245, IPFIX_FT_NETSCALER_ICA_APP_PROCESS_ID, 4, IPFIX_CODING_UINT, "netscaler_ica_app_process_id", "The process ID of the application launched on the server" +246, IPFIX_FT_NETSCALER_ICA_APP_MODULE_PATH, 65535, IPFIX_CODING_STRING, "netscaler_ica_app_module_path", "path of the ICA application being launched" +247, IPFIX_FT_NETSCALER_ICA_DEVICE_SERIAL_NO, 4, IPFIX_CODING_UINT, "netscaler_ica_device_serial_no", "Used in conjunction with clientcookie to identify primary connection and tie up streams of a MSI connection" +248, IPFIX_FT_NETSCALER_MSI_CLIENT_COOKIE, 65535, IPFIX_CODING_BYTES, "netscaler_msi_client_cookie", "An identifier that helps to tie up multiple connections of the same session when Multi stream ICA is used. Should be same across all MSI connections" +249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "netscaler_ica_flags", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported" +250, IPFIX_FT_NETSCALER_ICA_USERNAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_username", "Username for the ICA session" +251, IPFIX_FT_NETSCALER_LICENSE_TYPE, 1, IPFIX_CODING_UINT, "netscaler_license_type", "" +252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "netscaler_max_license_count", "" +252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "netscaler_max_license_count", "" +253, IPFIX_FT_NETSCALER_CURRENT_LICENSE_CONSUMED, 8, IPFIX_CODING_UINT, "netscaler_current_license_consumed", "" +254, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_START_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_network_update_start_time", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record began Number of seconds since Unix epoch (usually...?)" +255, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_END_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_network_update_end_time", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record ended Number of seconds since Unix epoch (usually...?)" +256, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_SRTT, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_srtt", "RTT smoothed over the client side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" +257, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_SRTT, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_srtt", "RTT smoothed over the server side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" +258, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_DELAY, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_delay", "Indicates time taken by Netscaler to process this client side packet (NS introduced processing delay)" +259, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_DELAY, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_delay", "Indicates time taken by Netscaler to process this server side packet (NS introduced processing delay)" +260, IPFIX_FT_NETSCALER_ICA_HOST_DELAY, 4, IPFIX_CODING_UINT, "netscaler_ica_host_delay", "Indicates a portion of the ICA RTT measurement - time delay introduced at the Host while processing the packet" +261, IPFIX_FT_NETSCALER_ICA_CLIENTSIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "netscaler_ica_clientside_window_size", "TCP window size on the client connection" +262, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "netscaler_ica_server_side_window_size", "TCP window size on the server connection" +263, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "netscaler_ica_client_side_rto_count", "Number of times retransmission timeout occurred on client connection" +264, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "netscaler_ica_server_side_rto_count", "Number of times retransmission timeout occurred on server connection" +265, IPFIX_FT_NETSCALER_ICA_L7_CLIENT_LATENCY, 4, IPFIX_CODING_UINT, "netscaler_ica_l7_client_latency", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on client side pcb." +266, IPFIX_FT_NETSCALER_ICA_L7_SERVER_LATENCY, 4, IPFIX_CODING_UINT, "netscaler_ica_l7_server_latency", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on server side pcb." +267, IPFIX_FT_NETSCALER_HTTP_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "netscaler_http_domain_name", "HTTP domain name" +268, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_CORE_ID, 4, IPFIX_CODING_UINT, "netscaler_cache_redir_client_connection_core_id", "The client connection id is unique within a process. Hence the process id of the client connection is also passed to make the complete set unique." +269, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "netscaler_cache_redir_client_connection_transaction_id", "When a request hits CR vserver and is redirected to the cache server and a cache miss happens, the cache sends a the request to the origin server. This request mostly comes back to the NS. This ID is used to link the cache request with the actual client request on the collector." diff --git a/lib/make-ipfix_def_netscaler_h.awk b/lib/make-ipfix_def_netscaler_h.awk new file mode 100755 index 0000000..0f3b87e --- /dev/null +++ b/lib/make-ipfix_def_netscaler_h.awk @@ -0,0 +1,38 @@ +#!/usr/bin/awk -f + +BEGIN { + FS = "," + i=0 + print "/*\n * NETSCALER IPFIX defines\n *\n * This is a generated file. Do not edit! \n *\n */\n#ifndef IPFIX_NETSCALER_DEF_H\n#define IPFIX_NETSCALER_DEF_H\n\n#define IPFIX_ENO_NETSCALER\t5951\n\n" +} + + +NF==0 { next } +/#/ { next } + + +match ($0, /\/\/|\*|\/\*/) { + split($0, aux, substr($0, RSTART, RLENGTH)) + print substr($0, RSTART, RLENGTH)" "aux[2] } + +/\*\// { + print "*/" +} + + +NF==6 { + print "#define "$2" \t "$1 + + gsub("_FT_", "_CN_") + var[i]="#define "$2" \t "$5 + i++ +} + +END { + printf "\n/*\n * column name definitions\n */" + while(i > 0) { + print var[i] + i-- + } + print "\n#endif\n" +} diff --git a/lib/make-ipfix_fields_netscaler_h.awk b/lib/make-ipfix_fields_netscaler_h.awk new file mode 100755 index 0000000..0c85f2c --- /dev/null +++ b/lib/make-ipfix_fields_netscaler_h.awk @@ -0,0 +1,29 @@ +#!/usr/bin/awk -f + +BEGIN { + FS = "," + print "/*\n * IPFIX structs, types and definitions\n *\n * This is a generated file. Do not edit! \n *\n */\n\n/*\n * ipfix information element list\n */\nipfix_field_type_t ipfix_ft_fokus[] = {" + +} + + +NF==0 { next } +/#/ { next } + + +match ($0, /\/\/|\*|\/\*/) { + split($0, aux, substr($0, RSTART, RLENGTH)) + print substr($0, RSTART, RLENGTH)" "aux[2] } + +/\*\// { + print "*/" +} + + +NF==6 { + print "\t{ IPFIX_ENO_NETSCALER, "$2", "$3", "$4", \n\t "$5", "$6" }," +} + +END { + print "\t{ 0, 0, -1, 0, NULL, NULL, }\n};" +} From 8e44c6f36eaa62fcb6445c4688b205ddbf96ef40 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 15:48:11 +1300 Subject: [PATCH 04/48] Continuing to integrate and test; some IEs are not being mapped and get mapped as something like 5951_129 while others are fine. Also helps if you include the actual definitions. --- collector/collector.c | 15 +++++++++++++-- lib/Makefile.in | 1 + lib/ipfix.c | 3 +++ lib/ipfix_NETSCALER_IEs.txt | 18 +++++++++--------- lib/make-ipfix_fields_netscaler_h.awk | 2 +- 5 files changed, 27 insertions(+), 12 deletions(-) diff --git a/collector/collector.c b/collector/collector.c index e8cf00e..d161b4c 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -42,6 +42,8 @@ #include "ipfix_col.h" #include "ipfix_def_fokus.h" #include "ipfix_fields_fokus.h" +#include "ipfix_def_netscaler.h" +#include "ipfix_fields_netscaler.h" #include "mlog.h" #include "mpoll.h" @@ -461,13 +463,22 @@ int main (int argc, char *argv[]) /** init ipfix lib */ if ( ipfix_init() <0 ) { - fprintf( stderr, "ipfix_init() failed: %s\n", strerror(errno) ); + fprintf( stderr, "ipfix_init() failed adding base types: %s\n", strerror(errno) ); exit(1); } if ( ipfix_add_vendor_information_elements( ipfix_ft_fokus ) <0 ) { - fprintf( stderr, "ipfix_add_ie() failed: %s\n", strerror(errno) ); + fprintf( stderr, "ipfix_add_ie() failed adding Fokus types: %s\n", strerror(errno) ); exit(1); } + if ( ipfix_add_vendor_information_elements( ipfix_ft_netscaler ) <0 ) { + fprintf( stderr, "ipfix_add_ie() failed adding Netscaler types: %s\n", strerror(errno) ); + exit(1); + } + if ( ipfix_add_vendor_information_elements( ipfix_reverse_ft_netscaler ) <0 ) { + fprintf( stderr, "ipfix_add_ie() failed adding Netscaler reverse types: %s\n", strerror(errno) ); + exit(1); + } + /** signal handler signal( SIGSEGV, sig_func ); diff --git a/lib/Makefile.in b/lib/Makefile.in index a4fc90b..18461a9 100644 --- a/lib/Makefile.in +++ b/lib/Makefile.in @@ -77,4 +77,5 @@ ipfix_%_netscaler.h: ipfix_NETSCALER_IEs.txt make-ipfix_%_netscaler_h.awk ipfix_reverse_%_netscaler.h: ipfix_%_netscaler.h make-ipfix_%_netscaler_h.awk sed -f make-reverse-IPFIX_FIELDS_H.sed-script-file $< > $@ + sed -i s,ipfix_ft_netscaler,ipfix_reverse_ft_netscaler, $@ diff --git a/lib/ipfix.c b/lib/ipfix.c index 96a8ca3..9818648 100644 --- a/lib/ipfix.c +++ b/lib/ipfix.c @@ -47,6 +47,9 @@ #include "ipfix.h" #include "ipfix_fields.h" #include "ipfix_reverse_fields.h" +#include "ipfix_def_netscaler.h" +#include "ipfix_fields_netscaler.h" +#include "ipfix_reverse_fields_netscaler.h" #ifdef SSLSUPPORT #include "ipfix_ssl.h" #endif diff --git a/lib/ipfix_NETSCALER_IEs.txt b/lib/ipfix_NETSCALER_IEs.txt index ef937b5..bab3723 100644 --- a/lib/ipfix_NETSCALER_IEs.txt +++ b/lib/ipfix_NETSCALER_IEs.txt @@ -18,12 +18,12 @@ 150, IPFIX_FT_NETSCALER_APP_NAME_INCARNATION_NUMBER, 4, IPFIX_CODING_UINT, "netscaler_app_name_incarnation_number", "Each named entity in the NetScaler is associated with an id. The name to id mapping is sent in the appname mapping template to the collector. Other records only contain the id of the entity and the corresponding name is stored by the collector and used when required. If a new entity gets added or an entity gets removed or modified, the incarnation number changes which indicates the collector to use the appname mapping record to update its database" 151, IPFIX_FT_NETSCALER_APP_NAME_APP_ID, 4, IPFIX_CODING_UINT, "netscaler_app_name_app_id", "The id of a named entity" 152, IPFIX_FT_NETSCALER_APP_NAME, 65535, IPFIX_CODING_STRING, "netscaler_app_name", "Name of the entity configured on Netscaler for which the name-to-id mapping is being sent in the current record" -153, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_FB, 8, IPFIX_CODING_HEX, "netscaler_http_req_rcv_fb", "Timestamp when the first byte of request was received from client at the NetScaler. Uses an NTP format of date/time" -156, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_FB, 8, IPFIX_CODING_HEX, "netscaler_http_req_forw_fb", "Timestamp when the first byte of request was forwarded to server from the NetScaler. Uses an NTP format of date/time" -157, IPFIX_FT_NETSCALER_HTTP_RES_RCV_FB, 8, IPFIX_CODING_HEX, "netscaler_http_res_rcv_fb", "Timestamp when the first byte of response was received from server at the NetScaler. Uses an NTP format of date/time" -158, IPFIX_FT_NETSCALER_HTTP_RES_FORW_FB, 8, IPFIX_CODING_HEX, "netscaler_http_res_forw_fb", "Timestamp when the first byte of response was forwarded to client from the NetScaler. Uses an NTP format of date/time" -159, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_LB, 8, IPFIX_CODING_HEX, "netscaler_http_req_rcv_lb", "Timestamp when the last byte of request was received from client at the NetScaler. Uses an NTP format of date/time" -160, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_LB, 8, IPFIX_CODING_HEX, "netscaler_http_req_forw_lb", "Uses an NTP format of date/time" +153, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_FB, 8, IPFIX_CODING_NTP, "netscaler_http_req_rcv_fb", "Timestamp when the first byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +156, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_FB, 8, IPFIX_CODING_NTP, "netscaler_http_req_forw_fb", "Timestamp when the first byte of request was forwarded to server from the NetScaler. Uses an NTP format of date/time" +157, IPFIX_FT_NETSCALER_HTTP_RES_RCV_FB, 8, IPFIX_CODING_NTP, "netscaler_http_res_rcv_fb", "Timestamp when the first byte of response was received from server at the NetScaler. Uses an NTP format of date/time" +158, IPFIX_FT_NETSCALER_HTTP_RES_FORW_FB, 8, IPFIX_CODING_NTP, "netscaler_http_res_forw_fb", "Timestamp when the first byte of response was forwarded to client from the NetScaler. Uses an NTP format of date/time" +159, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_LB, 8, IPFIX_CODING_NTP, "netscaler_http_req_rcv_lb", "Timestamp when the last byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +160, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_LB, 8, IPFIX_CODING_NTP, "netscaler_http_req_forw_lb", "Uses an NTP format of date/time" 161, IPFIX_FT_NETSCALER_MAIN_PAGE_ID, 4, IPFIX_CODING_UINT, "netscaler_main_page_id", "In a html page, the main page transaction is associated with all its embedded object transactions. Each such embedded object transaction record contains the transaction Id of the main page so that a parent link to the main transaction can be created. This is used in generating a waterfall chart model depicting the various timing information of the entire page loading." 162, IPFIX_FT_NETSCALER_MAIN_PAGE_COREID, 4, IPFIX_CODING_UINT, "netscaler_main_page_coreid", "The above transaction ID is unique within the process. Hence the exporting process ID of the main page is also required." 163, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_interaction_start_time", "The timestamp when the page starts loading" @@ -31,8 +31,8 @@ 165, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_START_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_render_start_time", "The timestamp when page rendering begins" 167, IPFIX_FT_NETSCALER_APP_TEMPLATE_NAME, 65535, IPFIX_CODING_STRING, "netscaler_app_template_name", "Name of the template to which the current entity belongs (see netscalerAppTemplateID)" 168, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_interaction_end_time", "The NTP timestamp when the HTML page becomes interactive to the user" -169, IPFIX_FT_NETSCALER_HTTP_RES_RCV_LB, 8, IPFIX_CODING_HEX, "netscaler_http_res_rcv_lb", "Uses an NTP format of date/time" -170, IPFIX_FT_NETSCALER_HTTP_RES_FORW_LB, 8, IPFIX_CODING_HEX, "netscaler_http_res_forw_lb", "Uses an NTP format of date/time" +169, IPFIX_FT_NETSCALER_HTTP_RES_RCV_LB, 8, IPFIX_CODING_NTP, "netscaler_http_res_rcv_lb", "Uses an NTP format of date/time" +170, IPFIX_FT_NETSCALER_HTTP_RES_FORW_LB, 8, IPFIX_CODING_NTP, "netscaler_http_res_forw_lb", "Uses an NTP format of date/time" 171, IPFIX_FT_NETSCALER_APP_UNIT_NAME_APP_ID, 4, IPFIX_CODING_UINT, "netscaler_app_unit_name_app_id", "Netscaler uses application templates that groups a set of entities which can be exported and imported when needed. This Information Element exportes the ID of the application template to which the entity belongs." 172, IPFIX_FT_NETSCALER_DB_LOGIN_FLAGS, 4, IPFIX_CODING_UINT, "netscaler_db_login_flags", "SQL login flags" 173, IPFIX_FT_NETSCALER_DB_REQ_TYPE, 1, IPFIX_CODING_UINT, "netscaler_db_req_type", "The type of database request" @@ -57,7 +57,7 @@ 200, IPFIX_FT_NETSCALER_ICA_SESSION_GUID, 65535, IPFIX_CODING_BYTES, "netscaler_ica_session_guid", "This is a 16-byte ID that identifies an ICA session. This value is present in the ICA protocol header. With Excalibur actual session GUID will be present, which indicates a unique session established by a user. Pre-Excalibur, this is a random Value generated by Netscaler." 201, IPFIX_FT_NETSCALE_ICA_CLIENT_VERSION, 65535, IPFIX_CODING_STRING, "NETSCALE_ICA_CLIENT_VERSION", "Version of the ICA client" 202, IPFIX_FT_NETSCALER_ICA_CLIENT_TYPE, 2, IPFIX_CODING_UINT, "netscaler_ica_client_type", "Identifies the type of ICA client" -203, IPFIX_FT_NETSCALER_ICA_CLIENT_IP, 4, IPFIX_CODING_IP, "netscaler_ica_client_ip", "The ICA client IP as sent by the Citrix Receiver" +203, IPFIX_FT_NETSCALER_ICA_CLIENT_IP, 4, IPFIX_CODING_IPADDR, "netscaler_ica_client_ip", "The ICA client IP as sent by the Citrix Receiver" 204, IPFIX_FT_NETSCALER_ICA_CLIENT_HOSTNAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_client_hostname", "name of the ICA client host" 205, IPFIX_FT_NETSCALER_AAA_USERNAME, 65535, IPFIX_CODING_STRING, "netscaler_aaa_username", "If the connection is over VPN, the AAA username for the session" 207, IPFIX_FT_NETSCALER_ICA_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_domain_name", "Domain of the ICA client" diff --git a/lib/make-ipfix_fields_netscaler_h.awk b/lib/make-ipfix_fields_netscaler_h.awk index 0c85f2c..eac6d27 100755 --- a/lib/make-ipfix_fields_netscaler_h.awk +++ b/lib/make-ipfix_fields_netscaler_h.awk @@ -2,7 +2,7 @@ BEGIN { FS = "," - print "/*\n * IPFIX structs, types and definitions\n *\n * This is a generated file. Do not edit! \n *\n */\n\n/*\n * ipfix information element list\n */\nipfix_field_type_t ipfix_ft_fokus[] = {" + print "/*\n * IPFIX structs, types and definitions\n *\n * This is a generated file. Do not edit! \n *\n */\n\n/*\n * ipfix information element list\n */\nipfix_field_type_t ipfix_ft_netscaler[] = {" } From 0339cbd93bac81d4243bb16a85fcd9b121c56e7a Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 16:54:14 +1300 Subject: [PATCH 05/48] Changed AWK script so it didn't use a comma to separate entries for building the IE data (description has comments). Various IEs were not getting created for Netscaler. It now uses the pipe '|'. Looking good. Just need a more up-to-date IE description, as there are IEs seen that are above 269 (the highest currently known). --- collector/collector.c | 9 +- lib/ipfix_NETSCALER_IEs.txt | 244 +++++++++++++------------- lib/make-ipfix_def_netscaler_h.awk | 2 +- lib/make-ipfix_fields_netscaler_h.awk | 2 +- 4 files changed, 129 insertions(+), 128 deletions(-) diff --git a/collector/collector.c b/collector/collector.c index d161b4c..f5797b3 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -44,6 +44,7 @@ #include "ipfix_fields_fokus.h" #include "ipfix_def_netscaler.h" #include "ipfix_fields_netscaler.h" +#include "ipfix_reverse_fields_netscaler.h" #include "mlog.h" #include "mpoll.h" @@ -463,19 +464,19 @@ int main (int argc, char *argv[]) /** init ipfix lib */ if ( ipfix_init() <0 ) { - fprintf( stderr, "ipfix_init() failed adding base types: %s\n", strerror(errno) ); + fprintf( stderr, "ipfix_init() failed: %s\n", strerror(errno) ); exit(1); } if ( ipfix_add_vendor_information_elements( ipfix_ft_fokus ) <0 ) { - fprintf( stderr, "ipfix_add_ie() failed adding Fokus types: %s\n", strerror(errno) ); + fprintf( stderr, "ipfix_add_vendor_information_elements() failed adding Fokus types: %s\n", strerror(errno) ); exit(1); } if ( ipfix_add_vendor_information_elements( ipfix_ft_netscaler ) <0 ) { - fprintf( stderr, "ipfix_add_ie() failed adding Netscaler types: %s\n", strerror(errno) ); + fprintf( stderr, "ipfix_add_vendor_information_elements() failed adding Netscaler types: %s\n", strerror(errno) ); exit(1); } if ( ipfix_add_vendor_information_elements( ipfix_reverse_ft_netscaler ) <0 ) { - fprintf( stderr, "ipfix_add_ie() failed adding Netscaler reverse types: %s\n", strerror(errno) ); + fprintf( stderr, "ipfix_add_vendor_information_elements() failed adding Netscaler reverse types: %s\n", strerror(errno) ); exit(1); } diff --git a/lib/ipfix_NETSCALER_IEs.txt b/lib/ipfix_NETSCALER_IEs.txt index bab3723..503de98 100644 --- a/lib/ipfix_NETSCALER_IEs.txt +++ b/lib/ipfix_NETSCALER_IEs.txt @@ -1,122 +1,122 @@ -128, IPFIX_FT_NETSCALER_ROUND_TRIP_TIME, 4, IPFIX_CODING_UINT, "netscaler_round_trip_time", "The TCP RTT of the flow in milliseconds since the time last record was sent" -129, IPFIX_FT_NETSCALER_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "netscaler_transaction_id", "At Layer-7, the four flows of a transaction between client and server (client-to-NS, NS-to-Server, Server-to-NS, NS-to-Client) are tied together using the transaction ID." -130, IPFIX_FT_NETSCALER_HTTP_REQ_URL, 65535, IPFIX_CODING_STRING, "netscaler_http_req_url", "HTTP request URL" -131, IPFIX_FT_NETSCALER_HTTP_REQ_COOKIE, 65535, IPFIX_CODING_STRING, "netscaler_http_req_cookie", "Value of Cookie header present in HTTP request" -132, IPFIX_FT_NETSCALER_FLOW_FLAGS, 8, IPFIX_CODING_UINT, "netscaler_flow_flags", "application layer flags, for use between the exporter and collector to indicate various Layer-7 events and types like the direction of the flow (client-in, svc-out, etc.), http version, NetScaler cache served responses, SSL, compression, TCP buffering, and many more." -133, IPFIX_FT_NETSCALER_CONNECTION_ID, 4, IPFIX_CODING_UINT, "netscaler_connection_id", "The two flows of a TCP connection are tied together with a connection ID" -134, IPFIX_FT_NETSCALER_SYSLOG_PRIORITY, 1, IPFIX_CODING_UINT, "netscaler_syslog_priority", "Priority of the syslog message being logged" -135, IPFIX_FT_NETSCALER_SYSLOG_MESSAGE, 65535, IPFIX_CODING_STRING, "netscaler_syslog_message", "The syslog message generated on Netscaler" -136, IPFIX_FT_NETSCALER_SYSLOG_TIMESTAMP, 8, IPFIX_CODING_UINT, "netscaler_syslog_timestamp", "Timestamp when the syslog (contained in the syslog record) was generated Number of milliseconds since Unix epoch" -140, IPFIX_FT_NETSCALER_HTTP_REQ_REFERER, 65535, IPFIX_CODING_STRING, "netscaler_http_req_referer", "Value of Referer header present in HTTP request" -141, IPFIX_FT_NETSCALER_HTTP_REQ_METHOD, 65535, IPFIX_CODING_STRING, "netscaler_http_req_method", "The request method in HTTP request" -142, IPFIX_FT_NETSCALER_HTTP_REQ_HOST, 65535, IPFIX_CODING_STRING, "netscaler_http_req_host", "Value of Host header in HTTP request" -143, IPFIX_FT_NETSCALER_HTTP_REQ_USER_AGENT, 65535, IPFIX_CODING_STRING, "netscaler_http_req_user_agent", "The User Agent string as seen in HTTP request header" -144, IPFIX_FT_NETSCALER_HTTP_RSP_STATUS, 2, IPFIX_CODING_UINT, "netscaler_http_rsp_status", "Status of HTTP response" -145, IPFIX_FT_NETSCALER_HTTP_RSP_LEN, 8, IPFIX_CODING_UINT, "netscaler_http_rsp_len", "The total size of HTTP response" -146, IPFIX_FT_NETSCALER_SERVER_TTFB, 8, IPFIX_CODING_UINT, "netscaler_server_ttfb", "Time elapsed in microseconds between receiving of request from the client and receiving the first byte of response from server" -147, IPFIX_FT_NETSCALER_SERVER_TTLB, 8, IPFIX_CODING_UINT, "netscaler_server_ttlb", "Time elapsed in microseconds between receiving of request from the client and receiving the last byte of response from server" -150, IPFIX_FT_NETSCALER_APP_NAME_INCARNATION_NUMBER, 4, IPFIX_CODING_UINT, "netscaler_app_name_incarnation_number", "Each named entity in the NetScaler is associated with an id. The name to id mapping is sent in the appname mapping template to the collector. Other records only contain the id of the entity and the corresponding name is stored by the collector and used when required. If a new entity gets added or an entity gets removed or modified, the incarnation number changes which indicates the collector to use the appname mapping record to update its database" -151, IPFIX_FT_NETSCALER_APP_NAME_APP_ID, 4, IPFIX_CODING_UINT, "netscaler_app_name_app_id", "The id of a named entity" -152, IPFIX_FT_NETSCALER_APP_NAME, 65535, IPFIX_CODING_STRING, "netscaler_app_name", "Name of the entity configured on Netscaler for which the name-to-id mapping is being sent in the current record" -153, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_FB, 8, IPFIX_CODING_NTP, "netscaler_http_req_rcv_fb", "Timestamp when the first byte of request was received from client at the NetScaler. Uses an NTP format of date/time" -156, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_FB, 8, IPFIX_CODING_NTP, "netscaler_http_req_forw_fb", "Timestamp when the first byte of request was forwarded to server from the NetScaler. Uses an NTP format of date/time" -157, IPFIX_FT_NETSCALER_HTTP_RES_RCV_FB, 8, IPFIX_CODING_NTP, "netscaler_http_res_rcv_fb", "Timestamp when the first byte of response was received from server at the NetScaler. Uses an NTP format of date/time" -158, IPFIX_FT_NETSCALER_HTTP_RES_FORW_FB, 8, IPFIX_CODING_NTP, "netscaler_http_res_forw_fb", "Timestamp when the first byte of response was forwarded to client from the NetScaler. Uses an NTP format of date/time" -159, IPFIX_FT_NETSCALER_HTTP_REQ_RCV_LB, 8, IPFIX_CODING_NTP, "netscaler_http_req_rcv_lb", "Timestamp when the last byte of request was received from client at the NetScaler. Uses an NTP format of date/time" -160, IPFIX_FT_NETSCALER_HTTP_REQ_FORW_LB, 8, IPFIX_CODING_NTP, "netscaler_http_req_forw_lb", "Uses an NTP format of date/time" -161, IPFIX_FT_NETSCALER_MAIN_PAGE_ID, 4, IPFIX_CODING_UINT, "netscaler_main_page_id", "In a html page, the main page transaction is associated with all its embedded object transactions. Each such embedded object transaction record contains the transaction Id of the main page so that a parent link to the main transaction can be created. This is used in generating a waterfall chart model depicting the various timing information of the entire page loading." -162, IPFIX_FT_NETSCALER_MAIN_PAGE_COREID, 4, IPFIX_CODING_UINT, "netscaler_main_page_coreid", "The above transaction ID is unique within the process. Hence the exporting process ID of the main page is also required." -163, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_interaction_start_time", "The timestamp when the page starts loading" -164, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_END_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_render_end_time", "The timestamp when the page completely renders" -165, IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_START_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_render_start_time", "The timestamp when page rendering begins" -167, IPFIX_FT_NETSCALER_APP_TEMPLATE_NAME, 65535, IPFIX_CODING_STRING, "netscaler_app_template_name", "Name of the template to which the current entity belongs (see netscalerAppTemplateID)" -168, IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME, 65535, IPFIX_CODING_STRING, "netscaler_http_client_interaction_end_time", "The NTP timestamp when the HTML page becomes interactive to the user" -169, IPFIX_FT_NETSCALER_HTTP_RES_RCV_LB, 8, IPFIX_CODING_NTP, "netscaler_http_res_rcv_lb", "Uses an NTP format of date/time" -170, IPFIX_FT_NETSCALER_HTTP_RES_FORW_LB, 8, IPFIX_CODING_NTP, "netscaler_http_res_forw_lb", "Uses an NTP format of date/time" -171, IPFIX_FT_NETSCALER_APP_UNIT_NAME_APP_ID, 4, IPFIX_CODING_UINT, "netscaler_app_unit_name_app_id", "Netscaler uses application templates that groups a set of entities which can be exported and imported when needed. This Information Element exportes the ID of the application template to which the entity belongs." -172, IPFIX_FT_NETSCALER_DB_LOGIN_FLAGS, 4, IPFIX_CODING_UINT, "netscaler_db_login_flags", "SQL login flags" -173, IPFIX_FT_NETSCALER_DB_REQ_TYPE, 1, IPFIX_CODING_UINT, "netscaler_db_req_type", "The type of database request" -174, IPFIX_FT_NETSCALER_DB_PROTOCOL_NAME, 1, IPFIX_CODING_UINT, "netscaler_db_protocol_name", "The database protocol being used" -175, IPFIX_FT_NETSCALER_DB_USER_NAME, 65535, IPFIX_CODING_STRING, "netscaler_db_user_name", "Database username" -176, IPFIX_FT_NETSCALER_DB_DATABASE_NAME, 65535, IPFIX_CODING_STRING, "netscaler_db_database_name", "DB database name" -177, IPFIX_FT_NETSCALER_DB_CLIENT_HOST_NAME, 65535, IPFIX_CODING_STRING, "netscaler_db_client_host_name", "DB client host name" -178, IPFIX_FT_NETSCALER_DB_REQ_STRING, 65535, IPFIX_CODING_STRING, "netscaler_db_req_string", "DB request string" -179, IPFIX_FT_NETSCALER_DB_RESP_STATUS_STRING, 65535, IPFIX_CODING_STRING, "netscaler_db_resp_status_string", "Status of the response as indicated in the Database response" -180, IPFIX_FT_NETSCALER_DB_RESP_STATUS, 8, IPFIX_CODING_UINT, "netscaler_db_resp_status", "SQL response status" -181, IPFIX_FT_NETSCALER_DB_RESP_LENGTH, 8, IPFIX_CODING_UINT, "netscaler_db_resp_length", "SQL response length" -182, IPFIX_FT_NETSCALER_CLIENT_RTT, 4, IPFIX_CODING_UINT, "netscaler_client_rtt", "The RTT of the client is exported in the server side records" -183, IPFIX_FT_NETSCALER_HTTP_CONTENT_TYPE, 65535, IPFIX_CODING_STRING, "netscaler_http_content_type", "The Content Type string as seen in the HTTP header" -185, IPFIX_FT_NETSCALER_HTTP_REQ_AUTHORIZATION, 65535, IPFIX_CODING_STRING, "netscaler_http_req_authorization", "Value of the Authorization HTTP header" -186, IPFIX_FT_NETSCALER_HTTP_REQ_VIA, 65535, IPFIX_CODING_STRING, "netscaler_http_req_via", "Value of the Via HTTP header" -187, IPFIX_FT_NETSCALER_HTTP_RES_LOCATION, 65535, IPFIX_CODING_STRING, "netscaler_http_res_location", "Value of the Location HTTP response header" -188, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE, 65535, IPFIX_CODING_STRING, "netscaler_http_res_set_cookie", "value of the Set-Cookie HTTP response header" -189, IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE2, 65535, IPFIX_CODING_STRING, "netscaler_http_res_set_cookie2", "value of the Set-Cookie2 HTTP response header" -190, IPFIX_FT_NETSCALER_HTTP_REQ_X_FORWARDED_FOR, 65535, IPFIX_CODING_STRING, "netscaler_http_req_x_forwarded_for", "value of the X-Forwarded-For HTTP header" -192, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_ID, 65535, IPFIX_CODING_BYTES, "netscaler_connection_chain_id", "This is a 16-byte ID that ties together all the TCP connections of ICA protocol, from client to the server that are terminated and established on layer-4 devices in the path. Since all these TCP connections belong to one logical connection from client to server, they will have the same connection chain ID." -193, IPFIX_FT_NETSCALER_CONNECTION_CHAIN_HOP_COUNT, 1, IPFIX_CODING_UINT, "netscaler_connection_chain_hop_count", "The hop count of the current device in the connection chain from client to server (see connection chain id for more details)" -200, IPFIX_FT_NETSCALER_ICA_SESSION_GUID, 65535, IPFIX_CODING_BYTES, "netscaler_ica_session_guid", "This is a 16-byte ID that identifies an ICA session. This value is present in the ICA protocol header. With Excalibur actual session GUID will be present, which indicates a unique session established by a user. Pre-Excalibur, this is a random Value generated by Netscaler." -201, IPFIX_FT_NETSCALE_ICA_CLIENT_VERSION, 65535, IPFIX_CODING_STRING, "NETSCALE_ICA_CLIENT_VERSION", "Version of the ICA client" -202, IPFIX_FT_NETSCALER_ICA_CLIENT_TYPE, 2, IPFIX_CODING_UINT, "netscaler_ica_client_type", "Identifies the type of ICA client" -203, IPFIX_FT_NETSCALER_ICA_CLIENT_IP, 4, IPFIX_CODING_IPADDR, "netscaler_ica_client_ip", "The ICA client IP as sent by the Citrix Receiver" -204, IPFIX_FT_NETSCALER_ICA_CLIENT_HOSTNAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_client_hostname", "name of the ICA client host" -205, IPFIX_FT_NETSCALER_AAA_USERNAME, 65535, IPFIX_CODING_STRING, "netscaler_aaa_username", "If the connection is over VPN, the AAA username for the session" -207, IPFIX_FT_NETSCALER_ICA_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_domain_name", "Domain of the ICA client" -208, IPFIX_FT_NETSCALER_ICA_CLIENT_LAUNCHER, 2, IPFIX_CODING_UINT, "netscaler_ica_client_launcher", "Identifies the ICA launcher" -209, IPFIX_FT_NETSCALER_ICA_SESSION_SETUP_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_session_setup_time", "Number of seconds since Unix epoch (usually...?)" -210, IPFIX_FT_NETSCALER_ICA_SERVER_NAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_server_name", "name of the ICA server" -214, IPFIX_FT_NETSCALER_ICA_SESSION_RECONNECTS, 1, IPFIX_CODING_UINT, "netscaler_ica_session_reconnects", "Number of times session reconnects happened" -215, IPFIX_FT_NETSCALER_ICA_RTT, 4, IPFIX_CODING_UINT, "netscaler_ica_rtt", "The ICA client sends a probe packet to the server, which sends back a response. Using this, the ICA process calculates the round trip time between the client and server which is exported to the appflow collector as ICA RTT." -216, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RX_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_rx_bytes", "Number of bytes received on client ICA connection" -217, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_TX_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_tx_bytes", "Number of bytes transmitted on client ICA connection" -219, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "netscaler_ica_client_side_packets_retransmit", "Number of packets retransmitted on clientside connection" -220, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_PACKETS_RETRANSMIT, 2, IPFIX_CODING_UINT, "netscaler_ica_server_side_packets_retransmit", "Number of packets retransmitted on serverside connection" -221, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTT, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_rtt", "The TCP rtt on the client ICA connection" -222, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTT, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_rtt", "The TCP rtt on the server ICA connection" -223, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_BEGIN_SEC, 4, IPFIX_CODING_UINT, "netscaler_ica_session_update_begin_sec", "Absolute timestamp of end of ICA session update Number of seconds since Unix epoch (usually...?)" -224, IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_END_SEC, 4, IPFIX_CODING_UINT, "netscaler_ica_session_update_end_sec", "Absolute timestamp of beginning of ICA session update Number of seconds since Unix epoch (usually...?)" -225, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_1", "The IDs of the ICA channels opened" -226, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_1_bytes", "The IDs of the ICA channels opened" -227, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_2", "The IDs of the ICA channels opened" -228, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_2_bytes", "The IDs of the ICA channels opened" -229, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_3", "The IDs of the ICA channels opened" -230, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_3_bytes", "The IDs of the ICA channels opened" -231, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_4", "The IDs of the ICA channels opened" -232, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_4_bytes", "The IDs of the ICA channels opened" -233, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_5", "The IDs of the ICA channels opened" -234, IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5_BYTES, 4, IPFIX_CODING_UINT, "netscaler_ica_channel_id_5_bytes", "The IDs of the ICA channels opened" -235, IPFIX_FT_NETSCALER_ICA_CONNECTION_PRIORITY, 2, IPFIX_CODING_UINT, "netscaler_ica_connection_priority", "Identifies the priority of ICA connection" -236, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_DURATION, 4, IPFIX_CODING_UINT, "netscaler_application_startup_duration", "The time elapsed between the launch of an application and when it started running" -237, IPFIX_FT_NETSCALER_ICA_LAUNCH_MECHANISM, 2, IPFIX_CODING_UINT, "netscaler_ica_launch_mechanism", "The mechanism used to launch ICA applicaiton" -238, IPFIX_FT_NETSCALER_ICA_APPLICATION_NAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_application_name", "ICA application name" -239, IPFIX_FT_NETSCALER_APPLICATION_STARTUP_TIME, 4, IPFIX_CODING_UINT, "netscaler_application_startup_time", "The time when an application started on the server Number of seconds since Unix epoch (usually...?)" -240, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TYPE, 2, IPFIX_CODING_UINT, "netscaler_ica_application_termination_type", "Indicates how the application termination happened, eg: User closed the app, session termination, abort etc" -241, IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_application_termination_time", "The time when the application was terminated Number of seconds since Unix epoch (usually...?)" -242, IPFIX_FT_NETSCALER_ICA_SESSION_END_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_session_end_time", "The time when the ICA session ended Number of seconds since Unix epoch (usually...?)" -243, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_JITTER, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_jitter", "The variance of client side RTT w.r.t the calculated RTT" -244, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_JITTER, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_jitter", "The variance of server side RTT w.r.t the calculated RTT" -245, IPFIX_FT_NETSCALER_ICA_APP_PROCESS_ID, 4, IPFIX_CODING_UINT, "netscaler_ica_app_process_id", "The process ID of the application launched on the server" -246, IPFIX_FT_NETSCALER_ICA_APP_MODULE_PATH, 65535, IPFIX_CODING_STRING, "netscaler_ica_app_module_path", "path of the ICA application being launched" -247, IPFIX_FT_NETSCALER_ICA_DEVICE_SERIAL_NO, 4, IPFIX_CODING_UINT, "netscaler_ica_device_serial_no", "Used in conjunction with clientcookie to identify primary connection and tie up streams of a MSI connection" -248, IPFIX_FT_NETSCALER_MSI_CLIENT_COOKIE, 65535, IPFIX_CODING_BYTES, "netscaler_msi_client_cookie", "An identifier that helps to tie up multiple connections of the same session when Multi stream ICA is used. Should be same across all MSI connections" -249, IPFIX_FT_NETSCALER_ICA_FLAGS, 8, IPFIX_CODING_UINT, "netscaler_ica_flags", "ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported" -250, IPFIX_FT_NETSCALER_ICA_USERNAME, 65535, IPFIX_CODING_STRING, "netscaler_ica_username", "Username for the ICA session" -251, IPFIX_FT_NETSCALER_LICENSE_TYPE, 1, IPFIX_CODING_UINT, "netscaler_license_type", "" -252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "netscaler_max_license_count", "" -252, IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT, 8, IPFIX_CODING_UINT, "netscaler_max_license_count", "" -253, IPFIX_FT_NETSCALER_CURRENT_LICENSE_CONSUMED, 8, IPFIX_CODING_UINT, "netscaler_current_license_consumed", "" -254, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_START_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_network_update_start_time", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record began Number of seconds since Unix epoch (usually...?)" -255, IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_END_TIME, 4, IPFIX_CODING_UINT, "netscaler_ica_network_update_end_time", "A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record ended Number of seconds since Unix epoch (usually...?)" -256, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_SRTT, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_srtt", "RTT smoothed over the client side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" -257, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_SRTT, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_srtt", "RTT smoothed over the server side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" -258, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_DELAY, 4, IPFIX_CODING_UINT, "netscaler_ica_client_side_delay", "Indicates time taken by Netscaler to process this client side packet (NS introduced processing delay)" -259, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_DELAY, 4, IPFIX_CODING_UINT, "netscaler_ica_server_side_delay", "Indicates time taken by Netscaler to process this server side packet (NS introduced processing delay)" -260, IPFIX_FT_NETSCALER_ICA_HOST_DELAY, 4, IPFIX_CODING_UINT, "netscaler_ica_host_delay", "Indicates a portion of the ICA RTT measurement - time delay introduced at the Host while processing the packet" -261, IPFIX_FT_NETSCALER_ICA_CLIENTSIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "netscaler_ica_clientside_window_size", "TCP window size on the client connection" -262, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_WINDOW_SIZE, 2, IPFIX_CODING_UINT, "netscaler_ica_server_side_window_size", "TCP window size on the server connection" -263, IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "netscaler_ica_client_side_rto_count", "Number of times retransmission timeout occurred on client connection" -264, IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTO_COUNT, 2, IPFIX_CODING_UINT, "netscaler_ica_server_side_rto_count", "Number of times retransmission timeout occurred on server connection" -265, IPFIX_FT_NETSCALER_ICA_L7_CLIENT_LATENCY, 4, IPFIX_CODING_UINT, "netscaler_ica_l7_client_latency", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on client side pcb." -266, IPFIX_FT_NETSCALER_ICA_L7_SERVER_LATENCY, 4, IPFIX_CODING_UINT, "netscaler_ica_l7_server_latency", "L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on server side pcb." -267, IPFIX_FT_NETSCALER_HTTP_DOMAIN_NAME, 65535, IPFIX_CODING_STRING, "netscaler_http_domain_name", "HTTP domain name" -268, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_CORE_ID, 4, IPFIX_CODING_UINT, "netscaler_cache_redir_client_connection_core_id", "The client connection id is unique within a process. Hence the process id of the client connection is also passed to make the complete set unique." -269, IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_TRANSACTION_ID, 4, IPFIX_CODING_UINT, "netscaler_cache_redir_client_connection_transaction_id", "When a request hits CR vserver and is redirected to the cache server and a cache miss happens, the cache sends a the request to the origin server. This request mostly comes back to the NS. This ID is used to link the cache request with the actual client request on the collector." +269|IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_TRANSACTION_ID|4|IPFIX_CODING_UINT|"netscaler_cache_redir_client_connection_transaction_id"|"When a request hits CR vserver and is redirected to the cache server and a cache miss happens, the cache sends a the request to the origin server. This request mostly comes back to the NS. This ID is used to link the cache request with the actual client request on the collector." +268|IPFIX_FT_NETSCALER_CACHE_REDIR_CLIENT_CONNECTION_CORE_ID|4|IPFIX_CODING_UINT|"netscaler_cache_redir_client_connection_core_id"|"The client connection id is unique within a process. Hence the process id of the client connection is also passed to make the complete set unique." +267|IPFIX_FT_NETSCALER_HTTP_DOMAIN_NAME|65535|IPFIX_CODING_STRING|"netscaler_http_domain_name"|"HTTP domain name" +266|IPFIX_FT_NETSCALER_ICA_L7_SERVER_LATENCY|4|IPFIX_CODING_UINT|"netscaler_ica_l7_server_latency"|"L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on server side pcb." +265|IPFIX_FT_NETSCALER_ICA_L7_CLIENT_LATENCY|4|IPFIX_CODING_UINT|"netscaler_ica_l7_client_latency"|"L7 layer latency measured using ICA probes and responses sent between Receiver and the Host, on client side pcb." +264|IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTO_COUNT|2|IPFIX_CODING_UINT|"netscaler_ica_server_side_rto_count"|"Number of times retransmission timeout occurred on server connection" +263|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTO_COUNT|2|IPFIX_CODING_UINT|"netscaler_ica_client_side_rto_count"|"Number of times retransmission timeout occurred on client connection" +262|IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_WINDOW_SIZE|2|IPFIX_CODING_UINT|"netscaler_ica_server_side_window_size"|"TCP window size on the server connection" +261|IPFIX_FT_NETSCALER_ICA_CLIENTSIDE_WINDOW_SIZE|2|IPFIX_CODING_UINT|"netscaler_ica_clientside_window_size"|"TCP window size on the client connection" +260|IPFIX_FT_NETSCALER_ICA_HOST_DELAY|4|IPFIX_CODING_UINT|"netscaler_ica_host_delay"|"Indicates a portion of the ICA RTT measurement - time delay introduced at the Host while processing the packet" +259|IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_DELAY|4|IPFIX_CODING_UINT|"netscaler_ica_server_side_delay"|"Indicates time taken by Netscaler to process this server side packet (NS introduced processing delay)" +258|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_DELAY|4|IPFIX_CODING_UINT|"netscaler_ica_client_side_delay"|"Indicates time taken by Netscaler to process this client side packet (NS introduced processing delay)" +257|IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_SRTT|4|IPFIX_CODING_UINT|"netscaler_ica_server_side_srtt"|"RTT smoothed over the server side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" +256|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_SRTT|4|IPFIX_CODING_UINT|"netscaler_ica_client_side_srtt"|"RTT smoothed over the client side connection by considering one eighth of current RTT and seven eighth of the smoothed RTT since the beginning of the connection" +255|IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_END_TIME|4|IPFIX_CODING_UINT|"netscaler_ica_network_update_end_time"|"A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record ended Number of seconds since Unix epoch (usually...?)" +254|IPFIX_FT_NETSCALER_ICA_NETWORK_UPDATE_START_TIME|4|IPFIX_CODING_UINT|"netscaler_ica_network_update_start_time"|"A network update record is sent at a defined interval that contains the ICA connection statistics for that interval. This Information Element contains the timestamp when the collection stats in this record began Number of seconds since Unix epoch (usually...?)" +253|IPFIX_FT_NETSCALER_CURRENT_LICENSE_CONSUMED|8|IPFIX_CODING_UINT|"netscaler_current_license_consumed"|"" +252|IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT|8|IPFIX_CODING_UINT|"netscaler_max_license_count"|"" +252|IPFIX_FT_NETSCALER_MAX_LICENSE_COUNT|8|IPFIX_CODING_UINT|"netscaler_max_license_count"|"" +251|IPFIX_FT_NETSCALER_LICENSE_TYPE|1|IPFIX_CODING_UINT|"netscaler_license_type"|"" +250|IPFIX_FT_NETSCALER_ICA_USERNAME|65535|IPFIX_CODING_STRING|"netscaler_ica_username"|"Username for the ICA session" +249|IPFIX_FT_NETSCALER_ICA_FLAGS|8|IPFIX_CODING_UINT|"netscaler_ica_flags"|"ICA specific flags: 0x0004 - MSI Enabled on Session; 0x0008 - Secondary Connection; 0x0010 - Seamless session; 0x0020 - Compression disabled explicitly; 0x0040 - Global ICA GUID enabled; 0x0080 - EUEM channel supported" +248|IPFIX_FT_NETSCALER_MSI_CLIENT_COOKIE|65535|IPFIX_CODING_BYTES|"netscaler_msi_client_cookie"|"An identifier that helps to tie up multiple connections of the same session when Multi stream ICA is used. Should be same across all MSI connections" +247|IPFIX_FT_NETSCALER_ICA_DEVICE_SERIAL_NO|4|IPFIX_CODING_UINT|"netscaler_ica_device_serial_no"|"Used in conjunction with clientcookie to identify primary connection and tie up streams of a MSI connection" +246|IPFIX_FT_NETSCALER_ICA_APP_MODULE_PATH|65535|IPFIX_CODING_STRING|"netscaler_ica_app_module_path"|"path of the ICA application being launched" +245|IPFIX_FT_NETSCALER_ICA_APP_PROCESS_ID|4|IPFIX_CODING_UINT|"netscaler_ica_app_process_id"|"The process ID of the application launched on the server" +244|IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_JITTER|4|IPFIX_CODING_UINT|"netscaler_ica_server_side_jitter"|"The variance of server side RTT w.r.t the calculated RTT" +243|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_JITTER|4|IPFIX_CODING_UINT|"netscaler_ica_client_side_jitter"|"The variance of client side RTT w.r.t the calculated RTT" +242|IPFIX_FT_NETSCALER_ICA_SESSION_END_TIME|4|IPFIX_CODING_UINT|"netscaler_ica_session_end_time"|"The time when the ICA session ended Number of seconds since Unix epoch (usually...?)" +241|IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TIME|4|IPFIX_CODING_UINT|"netscaler_ica_application_termination_time"|"The time when the application was terminated Number of seconds since Unix epoch (usually...?)" +240|IPFIX_FT_NETSCALER_ICA_APPLICATION_TERMINATION_TYPE|2|IPFIX_CODING_UINT|"netscaler_ica_application_termination_type"|"Indicates how the application termination happened, eg: User closed the app, session termination, abort etc" +239|IPFIX_FT_NETSCALER_APPLICATION_STARTUP_TIME|4|IPFIX_CODING_UINT|"netscaler_application_startup_time"|"The time when an application started on the server Number of seconds since Unix epoch (usually...?)" +238|IPFIX_FT_NETSCALER_ICA_APPLICATION_NAME|65535|IPFIX_CODING_STRING|"netscaler_ica_application_name"|"ICA application name" +237|IPFIX_FT_NETSCALER_ICA_LAUNCH_MECHANISM|2|IPFIX_CODING_UINT|"netscaler_ica_launch_mechanism"|"The mechanism used to launch ICA applicaiton" +236|IPFIX_FT_NETSCALER_APPLICATION_STARTUP_DURATION|4|IPFIX_CODING_UINT|"netscaler_application_startup_duration"|"The time elapsed between the launch of an application and when it started running" +235|IPFIX_FT_NETSCALER_ICA_CONNECTION_PRIORITY|2|IPFIX_CODING_UINT|"netscaler_ica_connection_priority"|"Identifies the priority of ICA connection" +234|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5_BYTES|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_5_bytes"|"The IDs of the ICA channels opened" +233|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_5|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_5"|"The IDs of the ICA channels opened" +232|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4_BYTES|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_4_bytes"|"The IDs of the ICA channels opened" +231|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_4|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_4"|"The IDs of the ICA channels opened" +230|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3_BYTES|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_3_bytes"|"The IDs of the ICA channels opened" +229|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_3|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_3"|"The IDs of the ICA channels opened" +228|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2_BYTES|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_2_bytes"|"The IDs of the ICA channels opened" +227|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_2|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_2"|"The IDs of the ICA channels opened" +226|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1_BYTES|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_1_bytes"|"The IDs of the ICA channels opened" +225|IPFIX_FT_NETSCALER_ICA_CHANNEL_ID_1|4|IPFIX_CODING_UINT|"netscaler_ica_channel_id_1"|"The IDs of the ICA channels opened" +224|IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_END_SEC|4|IPFIX_CODING_UINT|"netscaler_ica_session_update_end_sec"|"Absolute timestamp of beginning of ICA session update Number of seconds since Unix epoch (usually...?)" +223|IPFIX_FT_NETSCALER_ICA_SESSION_UPDATE_BEGIN_SEC|4|IPFIX_CODING_UINT|"netscaler_ica_session_update_begin_sec"|"Absolute timestamp of end of ICA session update Number of seconds since Unix epoch (usually...?)" +222|IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_RTT|4|IPFIX_CODING_UINT|"netscaler_ica_server_side_rtt"|"The TCP rtt on the server ICA connection" +221|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RTT|4|IPFIX_CODING_UINT|"netscaler_ica_client_side_rtt"|"The TCP rtt on the client ICA connection" +220|IPFIX_FT_NETSCALER_ICA_SERVER_SIDE_PACKETS_RETRANSMIT|2|IPFIX_CODING_UINT|"netscaler_ica_server_side_packets_retransmit"|"Number of packets retransmitted on serverside connection" +219|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_PACKETS_RETRANSMIT|2|IPFIX_CODING_UINT|"netscaler_ica_client_side_packets_retransmit"|"Number of packets retransmitted on clientside connection" +217|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_TX_BYTES|4|IPFIX_CODING_UINT|"netscaler_ica_client_side_tx_bytes"|"Number of bytes transmitted on client ICA connection" +216|IPFIX_FT_NETSCALER_ICA_CLIENT_SIDE_RX_BYTES|4|IPFIX_CODING_UINT|"netscaler_ica_client_side_rx_bytes"|"Number of bytes received on client ICA connection" +215|IPFIX_FT_NETSCALER_ICA_RTT|4|IPFIX_CODING_UINT|"netscaler_ica_rtt"|"The ICA client sends a probe packet to the server, which sends back a response. Using this, the ICA process calculates the round trip time between the client and server which is exported to the appflow collector as ICA RTT." +214|IPFIX_FT_NETSCALER_ICA_SESSION_RECONNECTS|1|IPFIX_CODING_UINT|"netscaler_ica_session_reconnects"|"Number of times session reconnects happened" +210|IPFIX_FT_NETSCALER_ICA_SERVER_NAME|65535|IPFIX_CODING_STRING|"netscaler_ica_server_name"|"name of the ICA server" +209|IPFIX_FT_NETSCALER_ICA_SESSION_SETUP_TIME|4|IPFIX_CODING_UINT|"netscaler_ica_session_setup_time"|"Number of seconds since Unix epoch (usually...?)" +208|IPFIX_FT_NETSCALER_ICA_CLIENT_LAUNCHER|2|IPFIX_CODING_UINT|"netscaler_ica_client_launcher"|"Identifies the ICA launcher" +207|IPFIX_FT_NETSCALER_ICA_DOMAIN_NAME|65535|IPFIX_CODING_STRING|"netscaler_ica_domain_name"|"Domain of the ICA client" +205|IPFIX_FT_NETSCALER_AAA_USERNAME|65535|IPFIX_CODING_STRING|"netscaler_aaa_username"|"If the connection is over VPN, the AAA username for the session" +204|IPFIX_FT_NETSCALER_ICA_CLIENT_HOSTNAME|65535|IPFIX_CODING_STRING|"netscaler_ica_client_hostname"|"name of the ICA client host" +203|IPFIX_FT_NETSCALER_ICA_CLIENT_IP|4|IPFIX_CODING_IPADDR|"netscaler_ica_client_ip"|"The ICA client IP as sent by the Citrix Receiver" +202|IPFIX_FT_NETSCALER_ICA_CLIENT_TYPE|2|IPFIX_CODING_UINT|"netscaler_ica_client_type"|"Identifies the type of ICA client" +201|IPFIX_FT_NETSCALE_ICA_CLIENT_VERSION|65535|IPFIX_CODING_STRING|"NETSCALE_ICA_CLIENT_VERSION"|"Version of the ICA client" +200|IPFIX_FT_NETSCALER_ICA_SESSION_GUID|65535|IPFIX_CODING_BYTES|"netscaler_ica_session_guid"|"This is a 16-byte ID that identifies an ICA session. This value is present in the ICA protocol header. With Excalibur actual session GUID will be present, which indicates a unique session established by a user. Pre-Excalibur, this is a random Value generated by Netscaler." +193|IPFIX_FT_NETSCALER_CONNECTION_CHAIN_HOP_COUNT|1|IPFIX_CODING_UINT|"netscaler_connection_chain_hop_count"|"The hop count of the current device in the connection chain from client to server (see connection chain id for more details)" +192|IPFIX_FT_NETSCALER_CONNECTION_CHAIN_ID|65535|IPFIX_CODING_BYTES|"netscaler_connection_chain_id"|"This is a 16-byte ID that ties together all the TCP connections of ICA protocol, from client to the server that are terminated and established on layer-4 devices in the path. Since all these TCP connections belong to one logical connection from client to server, they will have the same connection chain ID." +190|IPFIX_FT_NETSCALER_HTTP_REQ_X_FORWARDED_FOR|65535|IPFIX_CODING_STRING|"netscaler_http_req_x_forwarded_for"|"value of the X-Forwarded-For HTTP header" +189|IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE2|65535|IPFIX_CODING_STRING|"netscaler_http_res_set_cookie2"|"value of the Set-Cookie2 HTTP response header" +188|IPFIX_FT_NETSCALER_HTTP_RES_SET_COOKIE|65535|IPFIX_CODING_STRING|"netscaler_http_res_set_cookie"|"value of the Set-Cookie HTTP response header" +187|IPFIX_FT_NETSCALER_HTTP_RES_LOCATION|65535|IPFIX_CODING_STRING|"netscaler_http_res_location"|"Value of the Location HTTP response header" +186|IPFIX_FT_NETSCALER_HTTP_REQ_VIA|65535|IPFIX_CODING_STRING|"netscaler_http_req_via"|"Value of the Via HTTP header" +185|IPFIX_FT_NETSCALER_HTTP_REQ_AUTHORIZATION|65535|IPFIX_CODING_STRING|"netscaler_http_req_authorization"|"Value of the Authorization HTTP header" +183|IPFIX_FT_NETSCALER_HTTP_CONTENT_TYPE|65535|IPFIX_CODING_STRING|"netscaler_http_content_type"|"The Content Type string as seen in the HTTP header" +182|IPFIX_FT_NETSCALER_CLIENT_RTT|4|IPFIX_CODING_UINT|"netscaler_client_rtt"|"The RTT of the client is exported in the server side records" +181|IPFIX_FT_NETSCALER_DB_RESP_LENGTH|8|IPFIX_CODING_UINT|"netscaler_db_resp_length"|"SQL response length" +180|IPFIX_FT_NETSCALER_DB_RESP_STATUS|8|IPFIX_CODING_UINT|"netscaler_db_resp_status"|"SQL response status" +179|IPFIX_FT_NETSCALER_DB_RESP_STATUS_STRING|65535|IPFIX_CODING_STRING|"netscaler_db_resp_status_string"|"Status of the response as indicated in the Database response" +178|IPFIX_FT_NETSCALER_DB_REQ_STRING|65535|IPFIX_CODING_STRING|"netscaler_db_req_string"|"DB request string" +177|IPFIX_FT_NETSCALER_DB_CLIENT_HOST_NAME|65535|IPFIX_CODING_STRING|"netscaler_db_client_host_name"|"DB client host name" +176|IPFIX_FT_NETSCALER_DB_DATABASE_NAME|65535|IPFIX_CODING_STRING|"netscaler_db_database_name"|"DB database name" +175|IPFIX_FT_NETSCALER_DB_USER_NAME|65535|IPFIX_CODING_STRING|"netscaler_db_user_name"|"Database username" +174|IPFIX_FT_NETSCALER_DB_PROTOCOL_NAME|1|IPFIX_CODING_UINT|"netscaler_db_protocol_name"|"The database protocol being used" +173|IPFIX_FT_NETSCALER_DB_REQ_TYPE|1|IPFIX_CODING_UINT|"netscaler_db_req_type"|"The type of database request" +172|IPFIX_FT_NETSCALER_DB_LOGIN_FLAGS|4|IPFIX_CODING_UINT|"netscaler_db_login_flags"|"SQL login flags" +171|IPFIX_FT_NETSCALER_APP_UNIT_NAME_APP_ID|4|IPFIX_CODING_UINT|"netscaler_app_unit_name_app_id"|"Netscaler uses application templates that groups a set of entities which can be exported and imported when needed. This Information Element exportes the ID of the application template to which the entity belongs." +170|IPFIX_FT_NETSCALER_HTTP_RES_FORW_LB|8|IPFIX_CODING_NTP|"netscaler_http_res_forw_lb"|"Uses an NTP format of date/time" +169|IPFIX_FT_NETSCALER_HTTP_RES_RCV_LB|8|IPFIX_CODING_NTP|"netscaler_http_res_rcv_lb"|"Uses an NTP format of date/time" +168|IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_END_TIME|65535|IPFIX_CODING_STRING|"netscaler_http_client_interaction_end_time"|"The NTP timestamp when the HTML page becomes interactive to the user" +167|IPFIX_FT_NETSCALER_APP_TEMPLATE_NAME|65535|IPFIX_CODING_STRING|"netscaler_app_template_name"|"Name of the template to which the current entity belongs (see netscalerAppTemplateID)" +165|IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_START_TIME|65535|IPFIX_CODING_STRING|"netscaler_http_client_render_start_time"|"The timestamp when page rendering begins" +164|IPFIX_FT_NETSCALER_HTTP_CLIENT_RENDER_END_TIME|65535|IPFIX_CODING_STRING|"netscaler_http_client_render_end_time"|"The timestamp when the page completely renders" +163|IPFIX_FT_NETSCALER_HTTP_CLIENT_INTERACTION_START_TIME|65535|IPFIX_CODING_STRING|"netscaler_http_client_interaction_start_time"|"The timestamp when the page starts loading" +162|IPFIX_FT_NETSCALER_MAIN_PAGE_COREID|4|IPFIX_CODING_UINT|"netscaler_main_page_coreid"|"The above transaction ID is unique within the process. Hence the exporting process ID of the main page is also required." +161|IPFIX_FT_NETSCALER_MAIN_PAGE_ID|4|IPFIX_CODING_UINT|"netscaler_main_page_id"|"In a html page, the main page transaction is associated with all its embedded object transactions. Each such embedded object transaction record contains the transaction Id of the main page so that a parent link to the main transaction can be created. This is used in generating a waterfall chart model depicting the various timing information of the entire page loading." +160|IPFIX_FT_NETSCALER_HTTP_REQ_FORW_LB|8|IPFIX_CODING_NTP|"netscaler_http_req_forw_lb"|"Uses an NTP format of date/time" +159|IPFIX_FT_NETSCALER_HTTP_REQ_RCV_LB|8|IPFIX_CODING_NTP|"netscaler_http_req_rcv_lb"|"Timestamp when the last byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +158|IPFIX_FT_NETSCALER_HTTP_RES_FORW_FB|8|IPFIX_CODING_NTP|"netscaler_http_res_forw_fb"|"Timestamp when the first byte of response was forwarded to client from the NetScaler. Uses an NTP format of date/time" +157|IPFIX_FT_NETSCALER_HTTP_RES_RCV_FB|8|IPFIX_CODING_NTP|"netscaler_http_res_rcv_fb"|"Timestamp when the first byte of response was received from server at the NetScaler. Uses an NTP format of date/time" +156|IPFIX_FT_NETSCALER_HTTP_REQ_FORW_FB|8|IPFIX_CODING_NTP|"netscaler_http_req_forw_fb"|"Timestamp when the first byte of request was forwarded to server from the NetScaler. Uses an NTP format of date/time" +153|IPFIX_FT_NETSCALER_HTTP_REQ_RCV_FB|8|IPFIX_CODING_NTP|"netscaler_http_req_rcv_fb"|"Timestamp when the first byte of request was received from client at the NetScaler. Uses an NTP format of date/time" +152|IPFIX_FT_NETSCALER_APP_NAME|65535|IPFIX_CODING_STRING|"netscaler_app_name"|"Name of the entity configured on Netscaler for which the name-to-id mapping is being sent in the current record" +151|IPFIX_FT_NETSCALER_APP_NAME_APP_ID|4|IPFIX_CODING_UINT|"netscaler_app_name_app_id"|"The id of a named entity" +150|IPFIX_FT_NETSCALER_APP_NAME_INCARNATION_NUMBER|4|IPFIX_CODING_UINT|"netscaler_app_name_incarnation_number"|"Each named entity in the NetScaler is associated with an id. The name to id mapping is sent in the appname mapping template to the collector. Other records only contain the id of the entity and the corresponding name is stored by the collector and used when required. If a new entity gets added or an entity gets removed or modified, the incarnation number changes which indicates the collector to use the appname mapping record to update its database" +147|IPFIX_FT_NETSCALER_SERVER_TTLB|8|IPFIX_CODING_UINT|"netscaler_server_ttlb"|"Time elapsed in microseconds between receiving of request from the client and receiving the last byte of response from server" +146|IPFIX_FT_NETSCALER_SERVER_TTFB|8|IPFIX_CODING_UINT|"netscaler_server_ttfb"|"Time elapsed in microseconds between receiving of request from the client and receiving the first byte of response from server" +145|IPFIX_FT_NETSCALER_HTTP_RSP_LEN|8|IPFIX_CODING_UINT|"netscaler_http_rsp_len"|"The total size of HTTP response" +144|IPFIX_FT_NETSCALER_HTTP_RSP_STATUS|2|IPFIX_CODING_UINT|"netscaler_http_rsp_status"|"Status of HTTP response" +143|IPFIX_FT_NETSCALER_HTTP_REQ_USER_AGENT|65535|IPFIX_CODING_STRING|"netscaler_http_req_user_agent"|"The User Agent string as seen in HTTP request header" +142|IPFIX_FT_NETSCALER_HTTP_REQ_HOST|65535|IPFIX_CODING_STRING|"netscaler_http_req_host"|"Value of Host header in HTTP request" +141|IPFIX_FT_NETSCALER_HTTP_REQ_METHOD|65535|IPFIX_CODING_STRING|"netscaler_http_req_method"|"The request method in HTTP request" +140|IPFIX_FT_NETSCALER_HTTP_REQ_REFERER|65535|IPFIX_CODING_STRING|"netscaler_http_req_referer"|"Value of Referer header present in HTTP request" +136|IPFIX_FT_NETSCALER_SYSLOG_TIMESTAMP|8|IPFIX_CODING_UINT|"netscaler_syslog_timestamp"|"Timestamp when the syslog (contained in the syslog record) was generated Number of milliseconds since Unix epoch" +135|IPFIX_FT_NETSCALER_SYSLOG_MESSAGE|65535|IPFIX_CODING_STRING|"netscaler_syslog_message"|"The syslog message generated on Netscaler" +134|IPFIX_FT_NETSCALER_SYSLOG_PRIORITY|1|IPFIX_CODING_UINT|"netscaler_syslog_priority"|"Priority of the syslog message being logged" +133|IPFIX_FT_NETSCALER_CONNECTION_ID|4|IPFIX_CODING_UINT|"netscaler_connection_id"|"The two flows of a TCP connection are tied together with a connection ID" +132|IPFIX_FT_NETSCALER_FLOW_FLAGS|8|IPFIX_CODING_UINT|"netscaler_flow_flags"|"application layer flags, for use between the exporter and collector to indicate various Layer-7 events and types like the direction of the flow (client-in, svc-out, etc.), http version, NetScaler cache served responses, SSL, compression, TCP buffering, and many more." +131|IPFIX_FT_NETSCALER_HTTP_REQ_COOKIE|65535|IPFIX_CODING_STRING|"netscaler_http_req_cookie"|"Value of Cookie header present in HTTP request" +130|IPFIX_FT_NETSCALER_HTTP_REQ_URL|65535|IPFIX_CODING_STRING|"netscaler_http_req_url"|"HTTP request URL" +129|IPFIX_FT_NETSCALER_TRANSACTION_ID|4|IPFIX_CODING_UINT|"netscaler_transaction_id"|"At Layer-7, the four flows of a transaction between client and server (client-to-NS, NS-to-Server, Server-to-NS, NS-to-Client) are tied together using the transaction ID." +128|IPFIX_FT_NETSCALER_ROUND_TRIP_TIME|4|IPFIX_CODING_UINT|"netscaler_round_trip_time"|"The TCP RTT of the flow in milliseconds since the time last record was sent" diff --git a/lib/make-ipfix_def_netscaler_h.awk b/lib/make-ipfix_def_netscaler_h.awk index 0f3b87e..8cc9d0f 100755 --- a/lib/make-ipfix_def_netscaler_h.awk +++ b/lib/make-ipfix_def_netscaler_h.awk @@ -1,7 +1,7 @@ #!/usr/bin/awk -f BEGIN { - FS = "," + FS = "|" i=0 print "/*\n * NETSCALER IPFIX defines\n *\n * This is a generated file. Do not edit! \n *\n */\n#ifndef IPFIX_NETSCALER_DEF_H\n#define IPFIX_NETSCALER_DEF_H\n\n#define IPFIX_ENO_NETSCALER\t5951\n\n" } diff --git a/lib/make-ipfix_fields_netscaler_h.awk b/lib/make-ipfix_fields_netscaler_h.awk index eac6d27..e6f0a36 100755 --- a/lib/make-ipfix_fields_netscaler_h.awk +++ b/lib/make-ipfix_fields_netscaler_h.awk @@ -1,7 +1,7 @@ #!/usr/bin/awk -f BEGIN { - FS = "," + FS = "|" print "/*\n * IPFIX structs, types and definitions\n *\n * This is a generated file. Do not edit! \n *\n */\n\n/*\n * ipfix information element list\n */\nipfix_field_type_t ipfix_ft_netscaler[] = {" } From 01f52bd848462dc563cac04b223fc0712f16ae44 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 22:01:25 +1300 Subject: [PATCH 06/48] Change linking order to avoid segfault when my_inet_ntoa is called. Symptom was that strmov from -lmysqlclient was giving a segfault immediately on call; even though ti was my_inet_ntoa that was called. --- .gitignore | 1 + collector/Makefile.in | 2 +- lib/Makefile.in | 4 ++-- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 5a045e7..f0ed3b6 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ Makefile config.status collector/ipfix_collector examples/example_collector +examples/example_collector_db examples/example_exporter lib/ipfix_def_fokus.h lib/ipfix_fields_fokus.h diff --git a/collector/Makefile.in b/collector/Makefile.in index a70cd79..602ca52 100644 --- a/collector/Makefile.in +++ b/collector/Makefile.in @@ -36,7 +36,7 @@ install_sh = @install_sh@ DEFS = @DEFS@ CPPFLAGS = @CPPFLAGS@ LDFLAGS = @LDFLAGS@ -LIBS = @LIBS@ @MYSQLLIBS@ @SCTPLIBS@ @SSLLIBS@ -L../lib -L../libmisc -lipfix -lmisc +LIBS = @LIBS@ -L../lib -L../libmisc -lipfix -lmisc @MYSQLLIBS@ @SCTPLIBS@ @SSLLIBS@ CCOPT = -Wall -g INCLS = -I. -I.. -I../lib -I../libmisc CFLAGS = $(CCOPT) $(INCLS) $(DEFS) diff --git a/lib/Makefile.in b/lib/Makefile.in index 18461a9..3be62a7 100644 --- a/lib/Makefile.in +++ b/lib/Makefile.in @@ -35,8 +35,8 @@ OPENSSL = @OPENSSL@ DEFS = @DEFS@ CPPFLAGS = @CPPFLAGS@ -LDFLAGS = @LDFLAGS@ -L../libmisc -LIBS = @LIBS@ @SSLLIBS@ @MYSQLLIBS@ -lmisc +LDFLAGS = -L../libmisc @LDFLAGS@ +LIBS = @LIBS@ -lmisc @SSLLIBS@ @MYSQLLIBS@ CCOPT = -Wall -g INCLS = -I. -I.. -I../libmisc CFLAGS = $(CCOPT) $(INCLS) $(DEFS) From bf1f0a90d31e58df86be104d928bd4f8e07bffeb Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 23:24:56 +1300 Subject: [PATCH 07/48] Ignore paddingOctet (210 == 0xD2) when created table columns, to prevent trying to create a table with duplicate columns. Similarly, don't try and populate such a column. --- lib/ipfix_col_db.c | 4 ++++ lib/ipfix_db.c | 7 +++++++ 2 files changed, 11 insertions(+) diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index a1ec764..658ce50 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -167,6 +167,10 @@ int ipfix_export_drecord_db( ipfixs_node_t *s, t->tablename, IPFIX_DB_DT_MSGID, s->last_msgid ); nbytes = strlen(query); for ( i=0; iipfixt->nfields; i++ ) { + if ( t->ipfixt->fields[i].elem->ft->eno == 0 + && t->ipfixt->fields[i].elem->ft->ftype == 0xD2 ) { + continue; /* D2 == 210, paddingOctets */ + } #ifdef IENAME_COLUMNS nbytes += snprintf( query+nbytes, sizeof(query)-nbytes, ", %s='", t->ipfixt->fields[i].elem->ft->name ); diff --git a/lib/ipfix_db.c b/lib/ipfix_db.c index e802950..6dd8767 100644 --- a/lib/ipfix_db.c +++ b/lib/ipfix_db.c @@ -240,7 +240,14 @@ int ipfix_db_create_table( MYSQL *mysql, char *tablename, ipfix_template_t *t ) snprintf( query, MAXQUERYLEN, "CREATE TABLE %s ( %s INT UNSIGNED NOT NULL", tablename, IPFIX_DB_DT_MSGID ); + for ( i=0; infields; i++ ) { + + if ( t->fields[i].elem->ft->eno == 0 + && t->fields[i].elem->ft->ftype == 0xD2 ) { + continue; /* D2 == 210, paddingOctets */ + } + if ( ipfix_db_get_columnname( t->fields[i].elem->ft->eno, t->fields[i].elem->ft->ftype, tmpbuf, sizeof(tmpbuf)) <0 ) { From d747dde040775048eae2e381cd6d6446093ce81d Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 16 Feb 2015 23:53:04 +1300 Subject: [PATCH 08/48] Resolved issue #2 "[ipfix_decode_trecord] warning: ipfix_get_template_ident() failed" --- lib/ipfix_col.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index a0f593b..b9bfc19 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -29,7 +29,7 @@ extern "C" { #define DFLT_MYSQL_USER "ipfix" #define DFLT_MYSQL_PASSWORD "ipfix" -#define MAXTEMPLIDENT 120 +#define MAXTEMPLIDENT 240 typedef enum { IPFIX_INPUT_FILE, IPFIX_INPUT_IPCON From 8b051e4ddb5f85f959f6a7c0adb615f89bac2561 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 10:07:05 +1300 Subject: [PATCH 09/48] Prevent 'reverse ' prefix from creating invalid syntax when IENAME_COLUMNS is in use. --- lib/ipfix_col_db.c | 10 +++++----- lib/ipfix_db.c | 40 ++++++++++++++++++++-------------------- 2 files changed, 25 insertions(+), 25 deletions(-) diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index 658ce50..6e70823 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -84,7 +84,7 @@ int ipfix_export_newsrc_db( ipfixs_node_t *s, void *arg ) if ( exporter_id == 0 ) { snprintf( query, MAXQUERYLEN, - "INSERT INTO %s SET %s='%u', %s='%s', %s='-'", + "INSERT INTO `%s` SET `%s`='%u', `%s`='%s', `%s`='-'", IPFIX_DB_EXPORTERS, IPFIX_DB_EXP_ODID, s->odid, IPFIX_DB_EXP_ADDR, ipfix_col_input_get_ident( s->input ), IPFIX_DB_EXP_DESCR ); @@ -106,7 +106,7 @@ int ipfix_export_newmsg_db( ipfixs_node_t *s, ipfix_hdr_t *hdr, void *arg ) if ( data->mysql ) { snprintf( query, MAXQUERYLEN, - "INSERT INTO %s SET %s='%u', %s='%lu'", + "INSERT INTO `%s` SET `%s`='%u', `%s`='%lu'", IPFIX_DB_MESSAGETABLE, IPFIX_DB_MSGT_EXPID, s->exporterid, IPFIX_DB_MSGT_TIME, (hdr->version==IPFIX_VERSION_NF9)? @@ -163,7 +163,7 @@ int ipfix_export_drecord_db( ipfixs_node_t *s, * todo: log error if query buffer is too small */ snprintf( query, MAXQUERYLEN, - "INSERT INTO %s SET %s='%u'", + "INSERT INTO `%s` SET `%s`='%u'", t->tablename, IPFIX_DB_DT_MSGID, s->last_msgid ); nbytes = strlen(query); for ( i=0; iipfixt->nfields; i++ ) { @@ -172,7 +172,7 @@ int ipfix_export_drecord_db( ipfixs_node_t *s, continue; /* D2 == 210, paddingOctets */ } #ifdef IENAME_COLUMNS - nbytes += snprintf( query+nbytes, sizeof(query)-nbytes, ", %s='", + nbytes += snprintf( query+nbytes, sizeof(query)-nbytes, ", `%s`='", t->ipfixt->fields[i].elem->ft->name ); #else nbytes += snprintf( query+nbytes, sizeof(query)-nbytes, @@ -211,7 +211,7 @@ int ipfix_export_drecord_db( ipfixs_node_t *s, */ if ( t->message_snr != s->last_message_snr ) { snprintf( query, MAXQUERYLEN, - "INSERT INTO %s SET %s=%u, %s=%u ", + "INSERT INTO `%s` SET `%s`=%u, `%s`=%u ", IPFIX_DB_MAPPINGTABLE, IPFIX_DB_MT_MSGID, s->last_msgid, IPFIX_DB_MT_TMPLID, t->template_id ); mlogf( 4, "[%s] query: %s\n", func, query ); diff --git a/lib/ipfix_db.c b/lib/ipfix_db.c index 6dd8767..62f5850 100644 --- a/lib/ipfix_db.c +++ b/lib/ipfix_db.c @@ -125,9 +125,9 @@ int ipfix_db_open( MYSQL **mysqlp, /* check if ipfix exporters table exists */ snprintf( sql, MAXQUERYLEN, - "CREATE TABLE %s ( " - " %s INT NOT NULL AUTO_INCREMENT, %s INT UNSIGNED NOT NULL," - " %s BLOB NOT NULL, %s BLOB, PRIMARY KEY (%s) ) ", + "CREATE TABLE `%s` ( " + " `%s` INT NOT NULL AUTO_INCREMENT, `%s` INT UNSIGNED NOT NULL," + " `%s` BLOB NOT NULL, `%s` BLOB, PRIMARY KEY (`%s`) ) ", IPFIX_DB_EXPORTERS, IPFIX_DB_EXP_ID, IPFIX_DB_EXP_ODID, IPFIX_DB_EXP_ADDR, IPFIX_DB_EXP_DESCR, IPFIX_DB_EXP_ID ); @@ -141,9 +141,9 @@ int ipfix_db_open( MYSQL **mysqlp, /* check if ipfix message table exists */ snprintf( sql, MAXQUERYLEN, - "CREATE TABLE %s ( " - " %s INT NOT NULL AUTO_INCREMENT, " - " %s INT NOT NULL, %s INT NOT NULL, PRIMARY KEY (%s) ) ", + "CREATE TABLE `%s` ( " + " `%s` INT NOT NULL AUTO_INCREMENT, " + " `%s` INT NOT NULL, `%s` INT NOT NULL, PRIMARY KEY (`%s`) ) ", IPFIX_DB_MESSAGETABLE, IPFIX_DB_MSGT_ID, IPFIX_DB_MSGT_EXPID, IPFIX_DB_MSGT_TIME, IPFIX_DB_MSGT_ID ); @@ -157,9 +157,9 @@ int ipfix_db_open( MYSQL **mysqlp, /** check if ipfix templates table exists */ snprintf( sql, MAXQUERYLEN, - "CREATE TABLE %s ( " - " %s INT NOT NULL AUTO_INCREMENT, " - " %s BLOB, %s BLOB, PRIMARY KEY (%s) ) ", + "CREATE TABLE `%s` ( " + " `%s` INT NOT NULL AUTO_INCREMENT, " + " `%s` BLOB, `%s` BLOB, PRIMARY KEY (`%s`) ) ", IPFIX_DB_TEMPLATETABLE, IPFIX_DB_TMPL_ID, IPFIX_DB_TMPL_IDENT, IPFIX_DB_TMPL_TABLENAME, IPFIX_DB_TMPL_ID ); @@ -173,8 +173,8 @@ int ipfix_db_open( MYSQL **mysqlp, /** check if mapping table exists */ snprintf( sql, MAXQUERYLEN, - "CREATE TABLE %s ( " - " %s INT NOT NULL, %s INT NOT NULL, INDEX(%s) ) ", + "CREATE TABLE `%s` ( " + " `%s` INT NOT NULL, `%s` INT NOT NULL, INDEX(`%s`) ) ", IPFIX_DB_MAPPINGTABLE, IPFIX_DB_MT_MSGID, IPFIX_DB_MT_TMPLID, IPFIX_DB_MT_MSGID ); @@ -238,7 +238,7 @@ int ipfix_db_create_table( MYSQL *mysql, char *tablename, ipfix_template_t *t ) /** build query */ snprintf( query, MAXQUERYLEN, - "CREATE TABLE %s ( %s INT UNSIGNED NOT NULL", + "CREATE TABLE `%s` ( `%s` INT UNSIGNED NOT NULL", tablename, IPFIX_DB_DT_MSGID ); for ( i=0; infields; i++ ) { @@ -257,26 +257,26 @@ int ipfix_db_create_table( MYSQL *mysql, char *tablename, ipfix_template_t *t ) switch( t->fields[i].elem->ft->coding ) { case IPFIX_CODING_INT: snprintf( query+strlen(query), MAXQUERYLEN-strlen(query), - ", %s %sINT ", tmpbuf, + ", `%s` %sINT ", tmpbuf, (t->fields[i].elem->ft->length>4)?"BIG":"" ); break; case IPFIX_CODING_UINT: snprintf( query+strlen(query), MAXQUERYLEN-strlen(query), - ", %s %sINT UNSIGNED ", tmpbuf, + ", `%s` %sINT UNSIGNED ", tmpbuf, (t->fields[i].elem->ft->length>4)?"BIG":"" ); break; case IPFIX_CODING_STRING: snprintf( query+strlen(query), MAXQUERYLEN-strlen(query), - ", %s TEXT ", tmpbuf ); + ", `%s` TEXT ", tmpbuf ); break; case IPFIX_CODING_BYTES: snprintf( query+strlen(query), MAXQUERYLEN-strlen(query), - ", %s VARBINARY(%lu) ", tmpbuf, + ", `%s` VARBINARY(%lu) ", tmpbuf, (t->fields[i].elem->ft->lengthfields[i].elem->ft->length):(unsigned long)MAXBINARYIELEN); break; default: snprintf( query+strlen(query), MAXQUERYLEN-strlen(query), - ", %s VARBINARY(%d) ", tmpbuf, MAXBINARYIELEN ); + ", `%s` VARBINARY(%d) ", tmpbuf, MAXBINARYIELEN ); break; } } @@ -411,7 +411,7 @@ int ipfix_db_get_tablename( MYSQL *mysql, char *tablename, size_t tablenamelen, */ mysql_free_result(result); snprintf( query, MAXQUERYLEN, - "INSERT INTO %s SET %s='x', %s='x' ", + "INSERT INTO `%s` SET `%s`='x', `%s`='x' ", IPFIX_DB_TEMPLATETABLE, IPFIX_DB_TMPL_IDENT, IPFIX_DB_TMPL_TABLENAME ); @@ -437,7 +437,7 @@ int ipfix_db_get_tablename( MYSQL *mysql, char *tablename, size_t tablenamelen, /** update entry */ snprintf( query, MAXQUERYLEN, - "UPDATE %s SET %s='%s', %s='%s' WHERE %s='%d' ", + "UPDATE `%s` SET `%s`='%s', `%s`='%s' WHERE `%s`='%d' ", IPFIX_DB_TEMPLATETABLE, IPFIX_DB_TMPL_IDENT, ident, IPFIX_DB_TMPL_TABLENAME, tablename, IPFIX_DB_TMPL_ID, id ); if ( mysql_query( mysql, query ) !=0 ) { @@ -500,7 +500,7 @@ int ipfix_db_get_columnname( int eno, int type, char *buf, size_t buflen ) #ifdef IENAME_COLUMNS ipfix_field_t *elem; - if ( (elem=ipfix_get_ftinfo( int eno, int type )) !=NULL ) { + if ( (elem=ipfix_get_ftinfo(eno, type )) !=NULL ) { snprintf( buf, buflen, "%s", elem->ft->name ); return 0; } From 0412c8448b7e38c5b41791065146d706104584fd Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 10:10:37 +1300 Subject: [PATCH 10/48] Resolve issue: Remove "reverse " prefix for generated attribute names. #6 --- lib/make-reverse-IPFIX_FIELDS_H.sed-script-file | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/make-reverse-IPFIX_FIELDS_H.sed-script-file b/lib/make-reverse-IPFIX_FIELDS_H.sed-script-file index ada232d..ae3ac5c 100644 --- a/lib/make-reverse-IPFIX_FIELDS_H.sed-script-file +++ b/lib/make-reverse-IPFIX_FIELDS_H.sed-script-file @@ -1,4 +1,4 @@ s/ipfix_field_types/ipfix_reverse_field_types/g s/{ 0, /{ REV_PEN, /g -s/"\(.[^"]*\)"/"reverse \1"/g +#s/"\(.[^"]*\)"/"reverse \1"/g #s/IPFIX_FT_/IPFIX_FT_REVERSE/g From 5ab7a4f5494eb5c7539b524521d3a953ae6b6b1e Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 11:52:43 +1300 Subject: [PATCH 11/48] Add --jsonfile argument, but it doesn't really do anything yet. --- collector/collector.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/collector/collector.c b/collector/collector.c index f5797b3..9f1cfc8 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -70,6 +70,7 @@ typedef struct ipfix_collector_opts char *dbpw; /* db password */ char *dbname; /* db name */ char *dbhost; /* hostname */ + char *jsonfile; /* filename */ int udp; /* support udp clients */ int tcp; /* support tcp packets */ @@ -120,6 +121,7 @@ static void usage( char *taskname) " --dbname db name\n" " --dbuser db user\n" " --dbpw db password\n" + " --jsonfile use db only for templates; send data as JSON lines\n" #else " -d export into database\n" #endif @@ -303,6 +305,7 @@ int main (int argc, char *argv[]) { "cafile", 1, 0, 0}, { "cadir", 1, 0, 0}, { "help", 0, 0, 0}, + { "jsonfile", 1, 0, 0}, { 0, 0, 0, 0 } }; #endif @@ -327,6 +330,7 @@ int main (int argc, char *argv[]) par.dbname = DFLT_MYSQL_DBNAME; par.dbuser = DFLT_MYSQL_USER; par.dbpw = DFLT_MYSQL_PASSWORD; + par.jsonfile = NULL; snprintf( par.progname, sizeof(par.progname), "%s", basename( argv[0]) ); @@ -372,9 +376,12 @@ int main (int argc, char *argv[]) case 9: /* cadir */ par.cadir = optarg; break; - case 10: + case 10: /* help */ usage(par.progname); exit(1); + case 11: /* jsonfile */ + par.jsonfile = optarg; + break; } break; @@ -461,6 +468,11 @@ int main (int argc, char *argv[]) par.progname, par.port, par.dbexport?"database":par.datadir?"files":"stdout" ); + if ( par.dbexport && par.jsonfile ) { + mlogf(1, "[%s] templates go to database, data goes to file %s as one JSON document per line\n", + par.progname, par.jsonfile); + } + /** init ipfix lib */ if ( ipfix_init() <0 ) { From 7b21bfb61f7252d2fce2bef7ee55a908833744ad Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 12:00:17 +1300 Subject: [PATCH 12/48] Adding optional jsonfile argument to ipfix_col_init_mysqlexport (may be NULL if not used). But it doesn't do anything yet. --- collector/collector.c | 2 +- examples/example_collector_db.c | 4 ++-- lib/ipfix_col.h | 2 +- lib/ipfix_col_db.c | 3 ++- 4 files changed, 6 insertions(+), 5 deletions(-) diff --git a/collector/collector.c b/collector/collector.c index 9f1cfc8..ff8f451 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -209,7 +209,7 @@ int do_collect() #ifdef DBSUPPORT if ( par.dbexport ) { if ( ipfix_col_init_mysqlexport( par.dbhost, par.dbuser, - par.dbpw, par.dbname ) <0 ) { + par.dbpw, par.dbname, par.jsonfile ) <0 ) { mlogf( 0, "[%s] cannot connect to database\n", par.progname ); return -1; } diff --git a/examples/example_collector_db.c b/examples/example_collector_db.c index f3b2745..bcd7c1d 100644 --- a/examples/example_collector_db.c +++ b/examples/example_collector_db.c @@ -152,10 +152,10 @@ int main (int argc, char *argv[]) exit(1); } - /** activate database export + /** activate database export (jsonfile is not used) */ if ( ipfix_col_init_mysqlexport( dbhost, dbuser, - dbpw, dbname ) <0 ) { + dbpw, dbname, NULL ) <0 ) { fprintf( stderr, "cannot connect to database\n" ); ipfix_cleanup(); exit(1); diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index b9bfc19..767448f 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -109,7 +109,7 @@ typedef void* ipfix_col_t; void ipfix_col_init( void ); int ipfix_col_init_fileexport( char *datadir ); void ipfix_col_stop_fileexport( void ); -int ipfix_col_init_mysqlexport( char *host, char *user, char *pw, char *name ); +int ipfix_col_init_mysqlexport( char *host, char *user, char *pw, char *name, char *opt_jsonfile ); void ipfix_col_stop_mysqlexport( void ); int ipfix_col_register_export( ipfix_col_info_t *colinfo ); int ipfix_col_cancel_export( ipfix_col_info_t *colinfo ); diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index 6e70823..a992578 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -264,7 +264,8 @@ void ipfix_export_cleanup_db( void *arg ) /*----- export funcs -----------------------------------------------------*/ int ipfix_col_init_mysqlexport( char *dbhost, char *dbuser, - char *dbpw, char *dbname ) + char *dbpw, char *dbname, + char *opt_jsonfile ) { #ifdef DBSUPPORT void *data; From 08b23890b3b1b75f082fa333ace31cacc2b4917a Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 12:24:30 +1300 Subject: [PATCH 13/48] Plumbing in --jsonlines argument. Should be ready to write the JSON emission now. --- lib/ipfix_col_db.c | 9 +++++++-- lib/ipfix_col_db.h | 4 +++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index a992578..50a6e88 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -43,6 +43,7 @@ typedef struct ipfix_export_data_db { MYSQL *mysql; + char *json_filename; } ipfixe_data_db_t; #endif @@ -233,7 +234,9 @@ int ipfix_export_drecord_db( ipfixs_node_t *s, } int ipfix_export_init_db( char *dbhost, char *dbuser, - char *dbpw, char *dbname, void **arg ) + char *dbpw, char *dbname, + char *opt_jsonfile, + void **arg ) { ipfixe_data_db_t *data; @@ -245,6 +248,8 @@ int ipfix_export_init_db( char *dbhost, char *dbuser, return -1; } + data->json_filename = opt_jsonfile; + *arg = (void**)data; return 0; } @@ -270,7 +275,7 @@ int ipfix_col_init_mysqlexport( char *dbhost, char *dbuser, #ifdef DBSUPPORT void *data; - if ( ipfix_export_init_db( dbhost, dbuser, dbpw, dbname, &data ) <0 ) { + if ( ipfix_export_init_db( dbhost, dbuser, dbpw, dbname, opt_jsonfile, &data ) <0 ) { return -1; } diff --git a/lib/ipfix_col_db.h b/lib/ipfix_col_db.h index 77632a2..59be923 100644 --- a/lib/ipfix_col_db.h +++ b/lib/ipfix_col_db.h @@ -23,7 +23,9 @@ int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, void *arg ); void ipfix_export_cleanup_db( void *arg ); int ipfix_export_init_db( char *dbhost, char *dbuser, - char *dbpw, char *dbname, void **data ); + char *dbpw, char *dbname, + char *opt_jsonfile, + void **data ); #ifdef __cplusplus } From bf9c7b24d808fdf3601621c40ee18dd1816246c2 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 13:19:37 +1300 Subject: [PATCH 14/48] Prevent compiler warnings of implicitly declared functions --- libmisc/mlog.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/libmisc/mlog.h b/libmisc/mlog.h index d0c6658..de55119 100644 --- a/libmisc/mlog.h +++ b/libmisc/mlog.h @@ -15,14 +15,14 @@ extern "C" { #endif -void errorf ( char fmt[], ... ) __attribute__ ((format (printf, 1, 2))); -void debugf ( char fmt[], ... ) __attribute__ ((format (printf, 1, 2))); -void mlogf ( int verbosity, +extern void errorf ( char fmt[], ... ) __attribute__ ((format (printf, 1, 2))); +extern void debugf ( char fmt[], ... ) __attribute__ ((format (printf, 1, 2))); +extern void mlogf ( int verbosity, char fmt[], ... ) __attribute__ ((format (printf, 2, 3))); -int mlog_open ( char *logfile, char *prefix ); -void mlog_close ( void ); -void mlog_set_vlevel( int vlevel ); -int mlog_get_vlevel(); +extern int mlog_open ( char *logfile, char *prefix ); +extern void mlog_close ( void ); +extern void mlog_set_vlevel( int vlevel ); +extern int mlog_get_vlevel(); #ifdef __cplusplus } From 65914dd531cb7071f25c323ee4332c2299264813 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 13:40:44 +1300 Subject: [PATCH 15/48] Add include for mlog.h to stop use of implicitly defined function --- lib/ipfix_db.c | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/ipfix_db.c b/lib/ipfix_db.c index 62f5850..c34fe53 100644 --- a/lib/ipfix_db.c +++ b/lib/ipfix_db.c @@ -20,6 +20,7 @@ #include #include +#include "mlog.h" #include "misc.h" #include "ipfix_db.h" From cf7d2bd108ded926e87a0b7b1b9068743522e67f Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 22:56:47 +1300 Subject: [PATCH 16/48] JSON emmission now working for (u)ints, strings and bytes Note that 64-bit will need to change to a string representation perhaps. IP (4&6) addresses and NTP timestamps are the largest omissions yet. --- lib/Makefile.in | 4 +- lib/ipfix_col_db.c | 117 +++++++++++++++++++++++++++++++++++++++++++++ lib/ipfix_col_db.h | 2 + lib/json_out.c | 90 ++++++++++++++++++++++++++++++++++ lib/json_out.h | 17 +++++++ 5 files changed, 228 insertions(+), 2 deletions(-) create mode 100644 lib/json_out.c create mode 100644 lib/json_out.h diff --git a/lib/Makefile.in b/lib/Makefile.in index 3be62a7..9975523 100644 --- a/lib/Makefile.in +++ b/lib/Makefile.in @@ -42,7 +42,7 @@ INCLS = -I. -I.. -I../libmisc CFLAGS = $(CCOPT) $(INCLS) $(DEFS) TARGETS = ipfix_reverse_fields.h ipfix_def_fokus.h ipfix_fields_fokus.h ipfix_def_netscaler.h ipfix_fields_netscaler.h ipfix_reverse_fields_netscaler.h libipfix.a libipfix.so -SOURCES = ipfix.c ipfix_col.c ipfix_col_db.c ipfix_col_files.c ipfix_print.c +SOURCES = ipfix.c ipfix_col.c ipfix_col_db.c ipfix_col_files.c ipfix_print.c json_out.c OBJECTS = $(SOURCES:.c=.o) @IPFIX_DB_OBJ@ @IPFIX_SSL_OBJ@ DHPARAMS = dh512.pem dh1024.pem CLEANFILES = $(TARGETS) *.d *.o *.so *.so.$(VERSION) @@ -64,7 +64,7 @@ dhparams.c: $(DHPARAMS) $(OPENSSL) dh -noout -C < dh1024.pem >> $@ -ipfix.c: ipfix_reverse_fields.h +ipfix.c: ipfix_reverse_fields.h json_out.h ipfix_reverse_fields.h: ipfix_fields.h make-reverse-IPFIX_FIELDS_H.sed-script-file sed -f make-reverse-IPFIX_FIELDS_H.sed-script-file $< > $@ diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index 50a6e88..2d560ff 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -24,13 +24,16 @@ #include #include #include +#include +#include "mlog.h" #include "misc.h" #include "ipfix.h" #include "ipfix_col.h" #ifdef DBSUPPORT #include "ipfix_db.h" #include "ipfix_col_db.h" +#include "json_out.h" #endif /*------ defines ---------------------------------------------------------*/ @@ -148,6 +151,117 @@ int ipfix_export_trecord_db( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ) return 0; } +int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, + ipfixt_node_t *t, + ipfix_datarecord_t *d, + void *arg ) +{ + ipfixe_data_db_t *data = (ipfixe_data_db_t*)arg; + char *func = "export_drecord_jsonfile"; + int i; + FILE *json_file = NULL; + + if ( !data->json_filename ) { + return -1; + } + + /* Write data set to a file as JSON. One JSON document per line. + */ + + json_file = fopen(data->json_filename, "a"); + if (json_file == NULL) { + mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", + func, data->json_filename, strerror(errno)); + } + + fprintf(json_file, "{\"ipfix_template_id\":\"%d\"", t->ipfixt->tid); + + /* TODO The first attribute should be the template number. + */ + + for ( i=0; iipfixt->nfields; i++ ) { + if ( t->ipfixt->fields[i].elem->ft->eno == 0 + && t->ipfixt->fields[i].elem->ft->ftype == 0xD2 ) { + continue; /* D2 == 210, paddingOctets */ + } + + /* The attribute names come from trusted data, not from the protocol + */ + + fprintf(json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); + + switch (t->ipfixt->fields[i].elem->ft->coding) { + case IPFIX_CODING_UINT: + switch (d->lens[i]) { + case 1: + fprintf(json_file, "%u", *((uint8_t *) (d->addrs[i])) ); + break; + case 2: + fprintf(json_file, "%u", *((uint16_t *) (d->addrs[i])) ); + break; + case 4: + fprintf(json_file, "%u", *((uint32_t *) (d->addrs[i])) ); + break; + case 8: + fprintf(json_file, "%"PRIu64, *((uint64_t *) (d->addrs[i])) ); + break; + default: + mlogf(1, "[%s] JSON emmission of type UINT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); + fprintf(json_file, "null"); + } + break; + case IPFIX_CODING_INT: + switch (d->lens[i]) { + case 1: + fprintf(json_file, "%d", *((int8_t *) (d->addrs[i])) ); + break; + case 2: + fprintf(json_file, "%d", *((int16_t *) (d->addrs[i])) ); + break; + case 4: + fprintf(json_file, "%d", *((int32_t *) (d->addrs[i])) ); + break; + case 8: + fprintf(json_file, "%"PRId64, *((uint64_t *) (d->addrs[i])) ); + break; + default: + mlogf(1, "[%s] JSON emmission of type INT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); + fprintf(json_file, "null"); + } + break; + case IPFIX_CODING_FLOAT: + mlogf(1, "[%s] JSON emmission of type FLOAT not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); + fprintf(json_file, "null"); + break; + case IPFIX_CODING_IPADDR: + mlogf(1, "[%s] JSON emmission of type IPADDR not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); + fprintf(json_file, "null"); + break; + case IPFIX_CODING_NTP: + mlogf(1, "[%s] JSON emmission of type NTP not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); + fprintf(json_file, "null"); + break; + case IPFIX_CODING_STRING: + // don't forget JSON is meant to be UTF-8; IPFIX/Netscaler is ....? + json_render_string_to_FILE(json_file, (const char *) d->addrs[i], d->lens[i]); + break; + case IPFIX_CODING_BYTES: + json_render_bytes_as_hexpairs_to_FILE(json_file, d->addrs[i], d->lens[i]); + break; + default: + mlogf(1, "[%s] JSON emmission of type %d not currently supported (%s).\n", + func, t->ipfixt->fields[i].elem->ft->coding, t->ipfixt->fields[i].elem->ft->name); + fprintf(json_file, "null"); + } + } + + fprintf(json_file, "}\n"); + + if (json_file != NULL) { + fclose(json_file); + } + return 0; +} int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, @@ -288,6 +402,9 @@ int ipfix_col_init_mysqlexport( char *dbhost, char *dbuser, g_colinfo->export_newmsg = ipfix_export_newmsg_db; g_colinfo->export_trecord = ipfix_export_trecord_db; g_colinfo->export_drecord = ipfix_export_drecord_db; + if (opt_jsonfile != NULL) { + g_colinfo->export_drecord = ipfix_export_drecord_jsonfile; + } g_colinfo->export_cleanup = ipfix_export_cleanup_db; g_colinfo->data = data; diff --git a/lib/ipfix_col_db.h b/lib/ipfix_col_db.h index 59be923..202c6bb 100644 --- a/lib/ipfix_col_db.h +++ b/lib/ipfix_col_db.h @@ -21,6 +21,8 @@ int ipfix_export_newmsg_db( ipfixs_node_t *s, ipfix_hdr_t *hdr, void *arg ); int ipfix_export_trecord_db( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ); int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, void *arg ); +int ipfix_export_drecords_jsonfile( ipfixs_node_t *s, ipfixt_node_t *t, + ipfix_datarecord_t *d, void *arg ); void ipfix_export_cleanup_db( void *arg ); int ipfix_export_init_db( char *dbhost, char *dbuser, char *dbpw, char *dbname, diff --git a/lib/json_out.c b/lib/json_out.c new file mode 100644 index 0000000..34cb77f --- /dev/null +++ b/lib/json_out.c @@ -0,0 +1,90 @@ +#include +#include + +/* len includes the trailing NUL byte (like snprintf) */ + +void json_render_string_to_FILE(FILE *out, const char *s /* utf-8 */, int len) +{ + /* See json.org for the spec. The real spec gives some further instruction + * with regard to codepoints above the Unicode BMP. + * http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-404.pdf */ + + int offset; + + if (len > 0) len -= 1; /* Should never be less, but we wouldn't want it to wrap around */ + + fputc('"', out); + + for (offset = 0; offset < len; offset++) + { + switch(s[offset]) + { + case '"': + fprintf(out, "\\\""); + break; + case '\\': + fprintf(out, "\\\\"); + break; + case '/': + fprintf(out, "\\/"); + break; + case '\b': + fprintf(out, "\\b"); + break; + case '\f': /* really!? but not bell, not vertical tab and not NUL */ + fprintf(out, "\\f"); + break; + case '\n': + fprintf(out, "\\n"); + break; + case '\r': + fprintf(out, "\\r"); + break; + case '\t': + fprintf(out, "\\t"); + break; + default: + if(s[offset] < ' ') /* ie. control character, in ASCII */ + { + fprintf(out, "\\u%04X", s[offset]); + } + else if ( (s[offset] >= ' ') && (s[offset] < 127) ) + { + fputc(s[offset], out); + } + else if ( ((192 <= s[offset]) && (s[offset] <= 193)) + || ((245 <= s[offset]) && (s[offset] <= 255)) ) + { + /* Illegal UTF-8 bytes. Replace with REPLACEMENT CHARACTER */ + fprintf(out, "\\uFFFD"); + } + else + { + fputc(s[offset], out); + } + } + } + + fputc('"', out); +} + +/* JSON doesn't have a data representation for arbitrary bytes, so I'll use a hexdump sort + * of notation like XX XX XX XX YY YY YY YY ZZ ZZ ZZ ZZ with a space between each + * byte and a double-space between each 32-bit word. */ + +void json_render_bytes_as_hexpairs_to_FILE(FILE *json_file, const void *s /* bytes */, int len) +{ + const uint8_t *b = s; + int offset; + + fputc('"', json_file); + + for (offset = 0; offset < len; offset++) + { + fprintf(json_file, "%02X%s", b[offset], (offset==0)?"":" "); + if ((offset % 4) == 3) fputc(' ', json_file); + } + + fputc('"', json_file); +} + diff --git a/lib/json_out.h b/lib/json_out.h new file mode 100644 index 0000000..07fe084 --- /dev/null +++ b/lib/json_out.h @@ -0,0 +1,17 @@ +#ifndef __JSON_OUT_H +# define __JSON_OUT_H + +#include + +/* Assumes that the input is UTF-8 */ + +extern void json_render_string_to_FILE(FILE *out, const char *s, int len); + +/* JSON doesn't have a data representation for arbitrary bytes, so I'll use a hexdump sort + * of notation like XX XX XX XX YY YY YY YY ZZ ZZ ZZ ZZ with a space between each + * byte and a double-space between each 32-bit word. */ + +extern void json_render_bytes_as_hexpairs_to_FILE(FILE *out, const void *s /* bytes */, int len); + +#endif /* __JSON_OUT_H */ + From 26b49e8c2be7c952c2631d36993bc3dc2fa577b6 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 18 Feb 2015 23:48:36 +1300 Subject: [PATCH 17/48] IP addresses supported in JSON output. --- lib/ipfix.h | 2 +- lib/ipfix_col_db.c | 11 +++++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/lib/ipfix.h b/lib/ipfix.h index 310edcc..b1450b2 100644 --- a/lib/ipfix.h +++ b/lib/ipfix.h @@ -237,7 +237,7 @@ ipfix_field_t *ipfix_get_ftinfo( int eno, int ftid ); int ipfix_get_eno_ieid( char *field, int *eno, int *ieid ); ipfix_field_t *ipfix_create_unknown_ftinfo( int eno, int ftid ); void ipfix_free_unknown_ftinfo( ipfix_field_t *f ); - +extern int ipfix_snprint_ipaddr( char *str, size_t size, void *data, size_t len ); /** common funcs */ diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index 2d560ff..64a7b35 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -25,6 +25,7 @@ #include #include #include +#include #include "mlog.h" #include "misc.h" @@ -234,8 +235,14 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, fprintf(json_file, "null"); break; case IPFIX_CODING_IPADDR: - mlogf(1, "[%s] JSON emmission of type IPADDR not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); - fprintf(json_file, "null"); + { + char addrbuf[INET6_ADDRSTRLEN]; + + ipfix_snprint_ipaddr(addrbuf, INET6_ADDRSTRLEN, d->addrs[i], d->lens[i]); + + mlogf(1, "[%s] JSON emmission of type IPADDR not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); + fprintf(json_file, "\"%s\"", addrbuf); + } break; case IPFIX_CODING_NTP: mlogf(1, "[%s] JSON emmission of type NTP not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); From d22a06469e3feb41d405e26043bafbf5f151638e Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 19 Feb 2015 01:20:57 +1300 Subject: [PATCH 18/48] NTP timestamps are now supported (as RFC3306) for JSON output --- lib/ipfix_col_db.c | 4 +--- lib/json_out.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ lib/json_out.h | 4 ++++ 3 files changed, 51 insertions(+), 3 deletions(-) diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index 64a7b35..b5ea801 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -240,13 +240,11 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, ipfix_snprint_ipaddr(addrbuf, INET6_ADDRSTRLEN, d->addrs[i], d->lens[i]); - mlogf(1, "[%s] JSON emmission of type IPADDR not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); fprintf(json_file, "\"%s\"", addrbuf); } break; case IPFIX_CODING_NTP: - mlogf(1, "[%s] JSON emmission of type NTP not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); - fprintf(json_file, "null"); + json_render_NTP_timestamp_to_FILE(json_file, d->addrs[i], d->lens[i]); break; case IPFIX_CODING_STRING: // don't forget JSON is meant to be UTF-8; IPFIX/Netscaler is ....? diff --git a/lib/json_out.c b/lib/json_out.c index 34cb77f..ec00a27 100644 --- a/lib/json_out.c +++ b/lib/json_out.c @@ -1,4 +1,6 @@ #include +#include +#include #include /* len includes the trailing NUL byte (like snprintf) */ @@ -88,3 +90,47 @@ void json_render_bytes_as_hexpairs_to_FILE(FILE *json_file, const void *s /* byt fputc('"', json_file); } +/* JSON doesn't have a format for date/time representation, but ElasticSearch does, + * so we'll use that (RFC3306) + * + * Credit for the NTP conversion: Willy Kuo + * http://waitingkuo.blogspot.co.nz/2012/06/conversion-between-ntp-time-and-unix.html + */ + +struct ntp_time_t { + uint32_t seconds; + uint32_t fraction; +}; + +// Okay.... somewhere 'unix' is being defined as a token and leads to a syntax error +// So I renamed it unixtime for now. +// +static void convert_ntp_time_into_unix_time(const struct ntp_time_t *ntp, struct timeval *unixtime) +{ + unixtime->tv_sec = ntp->seconds - 0x83AA7E80; /* the seconds from Jan 1, 1900 to Jan 1, 1970 */ + unixtime->tv_usec = (uint32_t)( (double)ntp->fraction * 1.0e6 / (double)(1LL<<32) ); + return; +} + +// ElasticSearch would also take milliseconds from the Epoch... but I'd prefer +// something more general. +// +void json_render_NTP_timestamp_to_FILE(FILE *json_file, const void *addr, int len) +{ + struct ntp_time_t ntp; + struct timeval unixtime; + struct tm unixtime_utc_tm; + char datetimebuf[25]; + + ntp.seconds = (*((uint64_t *)addr)) >> 32; + ntp.fraction = (*((uint64_t *)addr)) & 0xFFFFFFFF; + + convert_ntp_time_into_unix_time(&ntp, &unixtime); + + gmtime_r(&unixtime.tv_sec, &unixtime_utc_tm); + + strftime(datetimebuf, sizeof datetimebuf, "%FT%T", &unixtime_utc_tm); + + fprintf(json_file, "\"%s.%03ldZ\"", datetimebuf, unixtime.tv_usec / 1000); +} + diff --git a/lib/json_out.h b/lib/json_out.h index 07fe084..9922f91 100644 --- a/lib/json_out.h +++ b/lib/json_out.h @@ -13,5 +13,9 @@ extern void json_render_string_to_FILE(FILE *out, const char *s, int len); extern void json_render_bytes_as_hexpairs_to_FILE(FILE *out, const void *s /* bytes */, int len); +/* eg. 2015-02-19T01:15:58.938Z */ + +extern void json_render_NTP_timestamp_to_FILE(FILE *json_file, const void *addr, int len); + #endif /* __JSON_OUT_H */ From bff9bfd7dacd312a651c41f0dead95c76d5f2d29 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 19 Feb 2015 01:54:37 +1300 Subject: [PATCH 19/48] Fixed bugs and imperfections in spacing for JSON BYTES --- lib/json_out.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/json_out.c b/lib/json_out.c index ec00a27..c2a9fb6 100644 --- a/lib/json_out.c +++ b/lib/json_out.c @@ -83,8 +83,8 @@ void json_render_bytes_as_hexpairs_to_FILE(FILE *json_file, const void *s /* byt for (offset = 0; offset < len; offset++) { - fprintf(json_file, "%02X%s", b[offset], (offset==0)?"":" "); - if ((offset % 4) == 3) fputc(' ', json_file); + fprintf(json_file, "%s%02X", (offset==0)?"":" ", b[offset]); + if (((offset % 4) == 3) && (offset < len-1)) fputc(' ', json_file); } fputc('"', json_file); From dd317d18fb2fb2cbb28a2cd4c51e2aa73b1305dc Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 19 Feb 2015 02:05:16 +1300 Subject: [PATCH 20/48] Resolve #10 Allow --jsonfile to be given '-' as an argument to mean stdout --- lib/ipfix_col_db.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index b5ea801..e1046b0 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -169,11 +169,15 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, /* Write data set to a file as JSON. One JSON document per line. */ - json_file = fopen(data->json_filename, "a"); - if (json_file == NULL) { - mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", - func, data->json_filename, strerror(errno)); - } + if (strcmp(data->json_filename, "-") == 0) { + json_file = stdout; + } else { + json_file = fopen(data->json_filename, "a"); + if (json_file == NULL) { + mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", + func, data->json_filename, strerror(errno)); + } + } fprintf(json_file, "{\"ipfix_template_id\":\"%d\"", t->ipfixt->tid); @@ -262,7 +266,10 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, fprintf(json_file, "}\n"); - if (json_file != NULL) { + if (json_file == stdout) { + fflush(stdout); /* stdout is by default fully-buffered when not to a terminal */ + } + else if (json_file != NULL) { fclose(json_file); } return 0; From a038b206f9a5a61a9abc4f01949e0a58bf41f8e7 Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Wed, 18 Feb 2015 19:13:14 +0000 Subject: [PATCH 21/48] Support for pkg-config --- Makefile.in | 14 ++++++++++++-- configure | 3 ++- configure.ac | 3 ++- libipfix.pc.in | 12 ++++++++++++ 4 files changed, 28 insertions(+), 4 deletions(-) create mode 100644 libipfix.pc.in diff --git a/Makefile.in b/Makefile.in index 6357a2a..99e1efa 100644 --- a/Makefile.in +++ b/Makefile.in @@ -9,6 +9,9 @@ prefix = @prefix@ SUBDIRS = lib libmisc examples collector probe +pkgconfigdir = @libdir@/pkgconfig +INST_PKGCONFIG = libipfix.pc + .PHONY: subdirs $(SUBDIRS) all: subdirs @@ -25,10 +28,17 @@ examples collector probe: lib libmisc $(SUBDIRS): $(MAKE) -C $@ $(filter-out $(SUBDIRS),$(MAKECMDGOALS)) -clean install uninstall: subdirs +install: subdirs + @[ -d $(DESTDIR)/${pkgconfigdir} ] || (mkdir -p $(DESTDIR)/${pkgconfigdir}; chmod 755 $(DESTDIR)/${pkgconfigdir}) + cp $(INST_PKGCONFIG) $(DESTDIR)/${pkgconfigdir}/ + +clean: subdirs + +uninstall: subdirs + rm -f $(addprefix $(DESTDIR)${pkgconfigdir}/, $(INST_PKGCONFIG)) distclean: subdirs - rm -rf config.status config.log Makefile + rm -rf config.status config.log Makefile config.h libipfix.pc # build binary package # to build signed package remove -us -uc diff --git a/configure b/configure index 408fb83..2f30a13 100755 --- a/configure +++ b/configure @@ -4781,7 +4781,7 @@ fi done -ac_config_files="$ac_config_files Makefile lib/Makefile libmisc/Makefile probe/Makefile examples/Makefile collector/Makefile" +ac_config_files="$ac_config_files Makefile lib/Makefile libmisc/Makefile probe/Makefile examples/Makefile collector/Makefile libipfix.pc" cat >confcache <<\_ACEOF # This file is a shell script that caches the results of configure @@ -5473,6 +5473,7 @@ do "probe/Makefile") CONFIG_FILES="$CONFIG_FILES probe/Makefile" ;; "examples/Makefile") CONFIG_FILES="$CONFIG_FILES examples/Makefile" ;; "collector/Makefile") CONFIG_FILES="$CONFIG_FILES collector/Makefile" ;; + "libipfix.pc") CONFIG_FILES="$CONFIG_FILES libipfix.pc" ;; *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;; esac diff --git a/configure.ac b/configure.ac index 5984cde..0b357bc 100644 --- a/configure.ac +++ b/configure.ac @@ -232,5 +232,6 @@ AC_CONFIG_FILES([Makefile \ libmisc/Makefile \ probe/Makefile \ examples/Makefile \ - collector/Makefile ]) + collector/Makefile \ + libipfix.pc ]) AC_OUTPUT diff --git a/libipfix.pc.in b/libipfix.pc.in new file mode 100644 index 0000000..bf4695f --- /dev/null +++ b/libipfix.pc.in @@ -0,0 +1,12 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: libipfix +Description: A C-Library for the IPFIX protocol +URL: http://www.ip-measurement.org/libipfix +Version: @PACKAGE_VERSION@ +Requires: +Libs: -L${libdir} -lipfix @LDFLAGS@ @LIBS@ +Cflags: -I${includedir} From 05bcbc4917dda4fd3e210f3efe0b6341346d593f Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 19 Feb 2015 12:35:47 +1300 Subject: [PATCH 22/48] Ignore some files used/generated in testing, and pkgconfig artifacts --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index f0ed3b6..78ec83b 100644 --- a/.gitignore +++ b/.gitignore @@ -16,3 +16,6 @@ cscope.* 10.*.*.* .depend config.log +libipfix.pc +netscaler_ipfix.pw +data.json From 9cc4f606979b9045a459e5b7fcab7f9257b4c913 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 19 Feb 2015 12:36:36 +1300 Subject: [PATCH 23/48] Resolve #4 Get database credentials from secure file instead of command-line --- collector/collector.c | 133 +++++++++++++++++++++++++++++------------- 1 file changed, 93 insertions(+), 40 deletions(-) diff --git a/collector/collector.c b/collector/collector.c index ff8f451..c4951c6 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -68,6 +68,7 @@ typedef struct ipfix_collector_opts int dbexport; /* flag */ char *dbuser; /* db username */ char *dbpw; /* db password */ + char *dbpw_filename; /* db password from file */ char *dbname; /* db name */ char *dbhost; /* hostname */ char *jsonfile; /* filename */ @@ -104,34 +105,35 @@ static void usage( char *taskname) const char helptxt[] = "[options]\n\n" "options:\n" - " -h this help\n" - " -4 accept connections via AF_INET socket\n" - " -6 accept connections via AF_INET6 socket\n" - " -o store files of collected data in this dir\n" - " -p listen on this port (default=4739)\n" - " -s support SCTP clients\n" - " -t support TCP clients\n" - " -u support UDP clients\n" - " -v increase verbose level\n" + " -h this help\n" + " -4 accept connections via AF_INET socket\n" + " -6 accept connections via AF_INET6 socket\n" + " -o store files of collected data in this dir\n" + " -p listen on this port (default=4739)\n" + " -s support SCTP clients\n" + " -t support TCP clients\n" + " -u support UDP clients\n" + " -v increase verbose level\n" #ifdef DBSUPPORT #ifdef HAVE_GETOPT_LONG "db options:\n" - " --db export into database\n" - " --dbhost db host\n" - " --dbname db name\n" - " --dbuser db user\n" - " --dbpw db password\n" - " --jsonfile use db only for templates; send data as JSON lines\n" + " --db export into database\n" + " --dbhost db host\n" + " --dbname db name\n" + " --dbuser db user\n" + " --dbpw db password\n" + " --dbpw-filename db password from first line of file\n" + " --jsonfile templates to db; data as JSON lines\n" #else - " -d export into database\n" + " -d export into database\n" #endif #ifdef SSLSUPPORT "ssl options:\n" - " --ssl expect tls/ssl clients\n" - " --key private key file to use\n" - " --cert certificate file to use\n" - " --cafile file of CAs\n" - " --cadir directory of CAs\n" + " --ssl expect tls/ssl clients\n" + " --key private key file to use\n" + " --cert certificate file to use\n" + " --cafile file of CAs\n" + " --cadir directory of CAs\n" #endif #endif "\n"; @@ -287,6 +289,40 @@ int do_collect() return retval; } +static int read_password_from_file(const char *dbpw_filename, char **dbpw) +{ + /* FIXME: note that this will leak memory because dbpw in the caller is + * a pointer to constant string (Text area), but if we set it here + * it is a pointer to heap memory, so the caller wouldn't readily know + * if it should be freed or not. In reality, this shouldn't be much of + * an issue as it the password will remain in memory for the duration + * of the program (which is in itself less ideal). */ + + FILE *dbpw_file; + char password[50]; + int retval; + + dbpw_file = fopen(dbpw_filename, "r"); + if (dbpw_file == NULL) { + return -1; + } + + if (fgets(password, sizeof password, dbpw_file) == NULL) { + password[0] = '\0'; + *dbpw = NULL; + retval = -1; + } else { + if (password[strlen(password)-1] == '\n') { + password[strlen(password)-1] = '\0'; + } + *dbpw = strdup(password); + retval = 0; + } + + fclose(dbpw_file); + return retval; +} + int main (int argc, char *argv[]) { char arg; /* short options: character */ @@ -306,31 +342,33 @@ int main (int argc, char *argv[]) { "cadir", 1, 0, 0}, { "help", 0, 0, 0}, { "jsonfile", 1, 0, 0}, + { "dbpw-filename", 1, 0, 0}, { 0, 0, 0, 0 } }; #endif /** set default options */ - par.tcp = 0; - par.udp = 0; - par.sctp = 0; - par.ssl = 0; - par.cafile = CAFILE; - par.cadir = CADIR; - par.keyfile = KEYFILE; - par.certfile= CERTFILE; - par.port = 0; - par.family = AF_UNSPEC; - par.logfile = NULL; - par.maxcon = 10; - par.datadir = NULL; - par.dbexport = 0; - par.dbhost = DFLT_MYSQL_HOST; - par.dbname = DFLT_MYSQL_DBNAME; - par.dbuser = DFLT_MYSQL_USER; - par.dbpw = DFLT_MYSQL_PASSWORD; - par.jsonfile = NULL; + par.tcp = 0; + par.udp = 0; + par.sctp = 0; + par.ssl = 0; + par.cafile = CAFILE; + par.cadir = CADIR; + par.keyfile = KEYFILE; + par.certfile = CERTFILE; + par.port = 0; + par.family = AF_UNSPEC; + par.logfile = NULL; + par.maxcon = 10; + par.datadir = NULL; + par.dbexport = 0; + par.dbhost = DFLT_MYSQL_HOST; + par.dbname = DFLT_MYSQL_DBNAME; + par.dbuser = DFLT_MYSQL_USER; + par.dbpw = DFLT_MYSQL_PASSWORD; + par.dbpw_filename = NULL; + par.jsonfile = NULL; snprintf( par.progname, sizeof(par.progname), "%s", basename( argv[0]) ); @@ -382,6 +420,9 @@ int main (int argc, char *argv[]) case 11: /* jsonfile */ par.jsonfile = optarg; break; + case 12: /* dbpw-filename */ + par.dbpw_filename = optarg; + break; } break; @@ -450,6 +491,18 @@ int main (int argc, char *argv[]) fprintf( stderr, "info: message dump, no data storage.\n" ); fflush( stderr ); } + if ( par.dbexport ) { + if ( (strcmp(par.dbpw, DFLT_MYSQL_PASSWORD) != 0 && par.dbpw_filename != NULL) ) { + fprintf( stderr, "error: don't specify both --dbpw and --dbpw-filename.\n" ); + exit(1); + } else if (par.dbpw_filename != NULL ) { + if (read_password_from_file(par.dbpw_filename, &par.dbpw) < 0) { + fprintf( stderr, "error: could not read database password from file '%s': %s\n", + par.dbpw_filename, strerror(errno) ); + exit(1); + } + } + } if ( par.port==0 ) { par.port = par.ssl?IPFIX_TLS_PORTNO:IPFIX_PORTNO; From 18f65e255356472341427e7ad1f1d06de4c05784 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 19 Feb 2015 14:13:42 +1300 Subject: [PATCH 24/48] Resolve #11 Listen for SIGHUP to reopen JSON datafile REFACTOR REQUIRED Needed to refactor the codebase slightly to make the necessary global datastructures visible to the relevant code. --- collector/collector.c | 10 ++++++ lib/ipfix.h | 1 + lib/ipfix_col.c | 11 ++++++ lib/ipfix_col.h | 7 ++++ lib/ipfix_col_db.c | 82 ++++++++++++++++++++++++++----------------- lib/ipfix_col_db.h | 1 + lib/ipfix_col_files.c | 2 -- lib/ipfix_print.c | 1 - 8 files changed, 79 insertions(+), 36 deletions(-) diff --git a/collector/collector.c b/collector/collector.c index c4951c6..60f2060 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -191,6 +191,14 @@ void sig_func( int signo ) exit_func( 1 ); } +void sig_hup( int signo ) +{ + if ( verbose_level ) + fprintf( stderr, "\n[%s] got SIGHUP, reopening JSON file if opened\n", par.progname ); + + ipfix_col_reload(); +} + int do_collect() { int i, retval = -1; @@ -553,6 +561,8 @@ int main (int argc, char *argv[]) signal( SIGTERM, sig_func ); signal( SIGINT, sig_func ); + signal( SIGHUP, sig_hup ); + /** do the work */ if ( do_collect() <0 ) diff --git a/lib/ipfix.h b/lib/ipfix.h index b1450b2..43ffd66 100644 --- a/lib/ipfix.h +++ b/lib/ipfix.h @@ -243,6 +243,7 @@ extern int ipfix_snprint_ipaddr( char *str, size_t size, void *data, size_t len */ int ipfix_init( void ); int ipfix_add_vendor_information_elements( ipfix_field_type_t *fields ); +int ipfix_reload( void ); void ipfix_cleanup( void ); #ifdef __cplusplus diff --git a/lib/ipfix_col.c b/lib/ipfix_col.c index 846bdfb..879a869 100644 --- a/lib/ipfix_col.c +++ b/lib/ipfix_col.c @@ -121,6 +121,8 @@ ipfixe_node_t *g_exporter = NULL; /* list of exporters */ ipfixs_node_t *udp_sources = NULL; /* list of sources */ mptimer_t g_mt; /* timer */ +ipfix_col_info_t *g_colinfo =NULL; + #ifdef SCTPSUPPORT sctp_assoc_node_t *sctp_assocs = NULL; /* sctp associations */ #endif @@ -2013,6 +2015,15 @@ int ipfix_get_template_ident( ipfix_template_t *t, return 0; } + +void ipfix_col_reload( void ) +{ +#ifdef DBSUPPORT + ipfix_col_db_reload(); +#endif +} + + /* * name: ipfix_col_cleanup() * parameters: none diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index 767448f..480f2cc 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -97,6 +97,8 @@ typedef struct ipfix_col_info void *data; } ipfix_col_info_t; +extern ipfix_col_info_t *g_colinfo; + typedef struct ipfix_col_info_node { struct ipfix_col_info_node *next; @@ -119,6 +121,7 @@ int ipfix_col_start_msglog( FILE *fpout ); void ipfix_col_stop_msglog( void ); int ipfix_col_close( int fd ); void ipfix_col_cleanup( void ); +void ipfix_col_reload( void ); /* internal, experimental */ int ipfix_parse_hdr( const uint8_t *buf, size_t buflen, ipfix_hdr_t *hdr ); @@ -135,6 +138,10 @@ int ipfix_col_close_ssl( ipfix_col_t *handle ); const char *ipfix_col_input_get_ident( ipfix_input_t *input ); +#ifdef DBSUPPORT +# include +#endif + #ifdef __cplusplus } #endif diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index e1046b0..cb42a8e 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -48,13 +48,12 @@ typedef struct ipfix_export_data_db { MYSQL *mysql; char *json_filename; + FILE *json_file; } ipfixe_data_db_t; #endif /*------ globals ---------------------------------------------------------*/ -static ipfix_col_info_t *g_colinfo =NULL; - /*----- revision id ------------------------------------------------------*/ static const char cvsid[]="$Id: ipfix_col_db.c 96 2009-03-27 19:19:27Z csc $"; @@ -160,7 +159,6 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, ipfixe_data_db_t *data = (ipfixe_data_db_t*)arg; char *func = "export_drecord_jsonfile"; int i; - FILE *json_file = NULL; if ( !data->json_filename ) { return -1; @@ -170,16 +168,18 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, */ if (strcmp(data->json_filename, "-") == 0) { - json_file = stdout; + data->json_file = stdout; } else { - json_file = fopen(data->json_filename, "a"); - if (json_file == NULL) { - mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", - func, data->json_filename, strerror(errno)); - } + if ( data->json_filename && data->json_file == NULL ) { + data->json_file = fopen(data->json_filename, "a"); + if (data->json_file == NULL) { + mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", + func, data->json_filename, strerror(errno)); + } + } } - fprintf(json_file, "{\"ipfix_template_id\":\"%d\"", t->ipfixt->tid); + fprintf(data->json_file, "{\"ipfix_template_id\":\"%d\"", t->ipfixt->tid); /* TODO The first attribute should be the template number. */ @@ -193,50 +193,50 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, /* The attribute names come from trusted data, not from the protocol */ - fprintf(json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); switch (t->ipfixt->fields[i].elem->ft->coding) { case IPFIX_CODING_UINT: switch (d->lens[i]) { case 1: - fprintf(json_file, "%u", *((uint8_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%u", *((uint8_t *) (d->addrs[i])) ); break; case 2: - fprintf(json_file, "%u", *((uint16_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%u", *((uint16_t *) (d->addrs[i])) ); break; case 4: - fprintf(json_file, "%u", *((uint32_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%u", *((uint32_t *) (d->addrs[i])) ); break; case 8: - fprintf(json_file, "%"PRIu64, *((uint64_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%"PRIu64, *((uint64_t *) (d->addrs[i])) ); break; default: mlogf(1, "[%s] JSON emmission of type UINT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); - fprintf(json_file, "null"); + fprintf(data->json_file, "null"); } break; case IPFIX_CODING_INT: switch (d->lens[i]) { case 1: - fprintf(json_file, "%d", *((int8_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%d", *((int8_t *) (d->addrs[i])) ); break; case 2: - fprintf(json_file, "%d", *((int16_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%d", *((int16_t *) (d->addrs[i])) ); break; case 4: - fprintf(json_file, "%d", *((int32_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%d", *((int32_t *) (d->addrs[i])) ); break; case 8: - fprintf(json_file, "%"PRId64, *((uint64_t *) (d->addrs[i])) ); + fprintf(data->json_file, "%"PRId64, *((uint64_t *) (d->addrs[i])) ); break; default: mlogf(1, "[%s] JSON emmission of type INT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); - fprintf(json_file, "null"); + fprintf(data->json_file, "null"); } break; case IPFIX_CODING_FLOAT: mlogf(1, "[%s] JSON emmission of type FLOAT not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); - fprintf(json_file, "null"); + fprintf(data->json_file, "null"); break; case IPFIX_CODING_IPADDR: { @@ -244,36 +244,34 @@ int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, ipfix_snprint_ipaddr(addrbuf, INET6_ADDRSTRLEN, d->addrs[i], d->lens[i]); - fprintf(json_file, "\"%s\"", addrbuf); + fprintf(data->json_file, "\"%s\"", addrbuf); } break; case IPFIX_CODING_NTP: - json_render_NTP_timestamp_to_FILE(json_file, d->addrs[i], d->lens[i]); + json_render_NTP_timestamp_to_FILE(data->json_file, d->addrs[i], d->lens[i]); break; case IPFIX_CODING_STRING: // don't forget JSON is meant to be UTF-8; IPFIX/Netscaler is ....? - json_render_string_to_FILE(json_file, (const char *) d->addrs[i], d->lens[i]); + json_render_string_to_FILE(data->json_file, (const char *) d->addrs[i], d->lens[i]); break; case IPFIX_CODING_BYTES: - json_render_bytes_as_hexpairs_to_FILE(json_file, d->addrs[i], d->lens[i]); + json_render_bytes_as_hexpairs_to_FILE(data->json_file, d->addrs[i], d->lens[i]); break; default: mlogf(1, "[%s] JSON emmission of type %d not currently supported (%s).\n", func, t->ipfixt->fields[i].elem->ft->coding, t->ipfixt->fields[i].elem->ft->name); - fprintf(json_file, "null"); + fprintf(data->json_file, "null"); } } - fprintf(json_file, "}\n"); + fprintf(data->json_file, "}\n"); - if (json_file == stdout) { - fflush(stdout); /* stdout is by default fully-buffered when not to a terminal */ - } - else if (json_file != NULL) { - fclose(json_file); + if (data->json_file) { + fflush(data->json_file); } return 0; } + int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, @@ -375,21 +373,39 @@ int ipfix_export_init_db( char *dbhost, char *dbuser, } data->json_filename = opt_jsonfile; + data->json_file = NULL; *arg = (void**)data; return 0; } +void ipfix_col_db_reload( void ) +{ + ipfixe_data_db_t *data = g_colinfo->data; + + if (data->json_file != NULL && data->json_file != stdout) { + fclose(data->json_file); + data->json_file = NULL; + /* It will get reopened when it next receives data */ + } +} + void ipfix_export_cleanup_db( void *arg ) { ipfixe_data_db_t *data = (ipfixe_data_db_t*)arg; if ( data ) { + if ( data->json_file ) { + fclose(data->json_file); + /* possible that the above could fail, but not sure what we would do */ + data->json_file = NULL; + } if ( data->mysql ) ipfix_db_close( &(data->mysql) ); free(data); } } + #endif /*----- export funcs -----------------------------------------------------*/ diff --git a/lib/ipfix_col_db.h b/lib/ipfix_col_db.h index 202c6bb..fb8b6e8 100644 --- a/lib/ipfix_col_db.h +++ b/lib/ipfix_col_db.h @@ -28,6 +28,7 @@ int ipfix_export_init_db( char *dbhost, char *dbuser, char *dbpw, char *dbname, char *opt_jsonfile, void **data ); +void ipfix_col_db_reload( void ); #ifdef __cplusplus } diff --git a/lib/ipfix_col_files.c b/lib/ipfix_col_files.c index 4cbc8f5..732b940 100644 --- a/lib/ipfix_col_files.c +++ b/lib/ipfix_col_files.c @@ -39,8 +39,6 @@ typedef struct ipfix_export_data_file /*------ globals ---------------------------------------------------------*/ -static ipfix_col_info_t *g_colinfo =NULL; - /*----- revision id ------------------------------------------------------*/ static const char cvsid[]="$Id: ipfix_col_files.c 96 2009-03-27 19:19:27Z csc $"; diff --git a/lib/ipfix_print.c b/lib/ipfix_print.c index b3288fa..b9eb780 100644 --- a/lib/ipfix_print.c +++ b/lib/ipfix_print.c @@ -38,7 +38,6 @@ static const char cvsid[]="$Id: ipfix_print.c 996 2009-03-19 18:14:44Z csc $"; /*----- globals ----------------------------------------------------------*/ -static ipfix_col_info_t *g_colinfo =NULL; static char tmpbuf[1000]; static void outf( FILE *fp, From 15d180b46022d71b740eed7c29d95ce55d023fd2 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 26 Feb 2015 11:58:09 +1300 Subject: [PATCH 25/48] Added a beginning of a template name mapping input file --- lib/template_mappings.txt | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 lib/template_mappings.txt diff --git a/lib/template_mappings.txt b/lib/template_mappings.txt new file mode 100644 index 0000000..39ebbef --- /dev/null +++ b/lib/template_mappings.txt @@ -0,0 +1,14 @@ +#PEN|Template_ID|Template_name +5951|271|ICA Session Setup +5951|274|ICA App Launch +5951|272|ICA Network Update +5951|608|ICA Stream Update +5951|273|ICA Session (Channel) Update +5951|275|ICA App Terminate +5951|604|WanOp Accelerate TCP +5951|605|WanOp Un-accelerated TCP +5951|606|UDPv4 +5951|256|TCPv4 Ingress +5951|257|TCPv4 Egress +5951|265|TCP appId and appName mapping + From 0efca83d7cf34c02306e78221cab7ab81d58525a Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Fri, 27 Feb 2015 12:59:53 +1300 Subject: [PATCH 26/48] Divorce the JSON collector from the MySQL collector. This undoes some previous work, and also fixes up some minor autoconf issues for RHEL6-based builders. Resolves #20 Completely divorce the JSON emitter from MySQL, making a separate collector --- .gitignore | 1 + collector/collector.c | 79 +- configure | 5429 +++++++++++++++++++++---------- configure.ac | 20 +- examples/example_collector_db.c | 5 +- lib/Makefile.in | 6 +- lib/ipfix_col.c | 11 +- lib/ipfix_col.h | 12 +- lib/ipfix_col_db.c | 152 +- lib/ipfix_col_db.h | 4 +- lib/ipfix_col_jsonlines.c | 262 ++ lib/ipfix_col_jsonlines.h | 28 + lib/ipfix_jsonlines.c | 17 + lib/ipfix_jsonlines.h | 13 + 14 files changed, 4070 insertions(+), 1969 deletions(-) create mode 100644 lib/ipfix_col_jsonlines.c create mode 100644 lib/ipfix_col_jsonlines.h create mode 100644 lib/ipfix_jsonlines.c create mode 100644 lib/ipfix_jsonlines.h diff --git a/.gitignore b/.gitignore index 78ec83b..40e5cba 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,4 @@ config.log libipfix.pc netscaler_ipfix.pw data.json +autom4te.cache diff --git a/collector/collector.c b/collector/collector.c index 60f2060..cdf19d1 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -39,6 +39,9 @@ #ifdef DBSUPPORT #include "ipfix_db.h" #endif +#ifdef JSONLINESSUPPORT +# include "ipfix_jsonlines.h" +#endif #include "ipfix_col.h" #include "ipfix_def_fokus.h" #include "ipfix_fields_fokus.h" @@ -71,6 +74,8 @@ typedef struct ipfix_collector_opts char *dbpw_filename; /* db password from file */ char *dbname; /* db name */ char *dbhost; /* hostname */ + + int jsonexport; /* flag */ char *jsonfile; /* filename */ int udp; /* support udp clients */ @@ -114,6 +119,11 @@ static void usage( char *taskname) " -t support TCP clients\n" " -u support UDP clients\n" " -v increase verbose level\n" +#ifdef JSONLINESSUPPORT + "jsonlines options:\n" + " --json export JSON to a file; one JSON doc/line\n" + " --jsonfile file to append to, or '-' for stdout\n" +#endif #ifdef DBSUPPORT #ifdef HAVE_GETOPT_LONG "db options:\n" @@ -123,7 +133,6 @@ static void usage( char *taskname) " --dbuser db user\n" " --dbpw db password\n" " --dbpw-filename db password from first line of file\n" - " --jsonfile templates to db; data as JSON lines\n" #else " -d export into database\n" #endif @@ -175,6 +184,9 @@ void exit_func ( int retval ) if ( par.datadir ) ipfix_col_stop_fileexport(); #ifdef DBSUPPORT if ( par.dbexport ) ipfix_col_stop_mysqlexport(); +#endif +#ifdef JSONLINESSUPPORT + if ( par.jsonexport ) ipfix_col_stop_jsonlinesexport(); #endif (void) ipfix_col_stop_msglog(); ipfix_col_cleanup(); @@ -194,9 +206,11 @@ void sig_func( int signo ) void sig_hup( int signo ) { if ( verbose_level ) - fprintf( stderr, "\n[%s] got SIGHUP, reopening JSON file if opened\n", par.progname ); + fprintf( stderr, "\n[%s] got SIGHUP, giving collectors a chance to react\n", par.progname ); - ipfix_col_reload(); +#ifdef JSONLINESSUPPORT + ipfix_col_reload_jsonlinesexport(); +#endif } int do_collect() @@ -219,12 +233,20 @@ int do_collect() #ifdef DBSUPPORT if ( par.dbexport ) { if ( ipfix_col_init_mysqlexport( par.dbhost, par.dbuser, - par.dbpw, par.dbname, par.jsonfile ) <0 ) { + par.dbpw, par.dbname ) <0 ) { mlogf( 0, "[%s] cannot connect to database\n", par.progname ); return -1; } } #endif +#ifdef JSONLINESSUPPORT + if ( par.jsonexport ) { + if ( ipfix_col_init_jsonlinesexport( par.jsonfile ) < 0 ) { + mlogf( 0, "[%s] cannot use jsonlines (WHY?)\n", par.progname ); + return -1; + } + } +#endif /** open ipfix collector port(s) */ @@ -338,19 +360,20 @@ int main (int argc, char *argv[]) char opt[] = "64stuhl:p:vo:"; #ifdef HAVE_GETOPT_LONG struct option lopt[] = { + { "db", 0, 0, 0}, { "dbhost", 1, 0, 0}, { "dbname", 1, 0, 0}, { "dbuser", 1, 0, 0}, { "dbpw", 1, 0, 0}, - { "db", 0, 0, 0}, + { "dbpw-filename", 1, 0, 0}, { "ssl", 0, 0, 0}, { "key", 1, 0, 0}, { "cert", 1, 0, 0}, { "cafile", 1, 0, 0}, { "cadir", 1, 0, 0}, { "help", 0, 0, 0}, + { "json", 0, 0, 0}, { "jsonfile", 1, 0, 0}, - { "dbpw-filename", 1, 0, 0}, { 0, 0, 0, 0 } }; #endif @@ -376,7 +399,8 @@ int main (int argc, char *argv[]) par.dbuser = DFLT_MYSQL_USER; par.dbpw = DFLT_MYSQL_PASSWORD; par.dbpw_filename = NULL; - par.jsonfile = NULL; + par.jsonexport = 0; + par.jsonfile = "-"; snprintf( par.progname, sizeof(par.progname), "%s", basename( argv[0]) ); @@ -392,44 +416,47 @@ int main (int argc, char *argv[]) { case 0: switch (loptidx) { - case 0: /* dbhost */ + case 0: /* db */ + par.dbexport = 1; + break; + case 1: /* dbhost */ par.dbhost = optarg; break; - case 1: /* dbname */ + case 2: /* dbname */ par.dbname = optarg; break; - case 2: /* dbuser */ + case 3: /* dbuser */ par.dbuser = optarg; break; - case 3: /* dbpw */ + case 4: /* dbpw */ par.dbpw = optarg; break; - case 4: /* db */ - par.dbexport = 1; + case 5: /* dbpw-filename */ + par.dbpw_filename = optarg; break; - case 5: /* ssl */ + case 6: /* ssl */ par.ssl = 1; break; - case 6: /* key */ + case 7: /* key */ par.keyfile = optarg; break; - case 7: /* cert */ + case 8: /* cert */ par.certfile = optarg; break; - case 8: /* cafile */ + case 9: /* cafile */ par.cafile = optarg; break; - case 9: /* cadir */ + case 10: /* cadir */ par.cadir = optarg; break; - case 10: /* help */ + case 11: /* help */ usage(par.progname); exit(1); - case 11: /* jsonfile */ - par.jsonfile = optarg; + case 12: /* json */ + par.jsonexport = 1; break; - case 12: /* dbpw-filename */ - par.dbpw_filename = optarg; + case 13: /* jsonfile */ + par.jsonfile = optarg; break; } break; @@ -495,7 +522,7 @@ int main (int argc, char *argv[]) if ( !par.udp && !par.tcp && !par.sctp ) par.tcp++; - if ( !par.dbexport && !par.datadir ) { + if ( !par.dbexport && !par.datadir && !par.jsonexport ) { fprintf( stderr, "info: message dump, no data storage.\n" ); fflush( stderr ); } @@ -529,8 +556,8 @@ int main (int argc, char *argv[]) par.progname, par.port, par.dbexport?"database":par.datadir?"files":"stdout" ); - if ( par.dbexport && par.jsonfile ) { - mlogf(1, "[%s] templates go to database, data goes to file %s as one JSON document per line\n", + if ( par.jsonexport ) { + mlogf(1, "[%s] data goes to file %s as one JSON document per line\n", par.progname, par.jsonfile); } diff --git a/configure b/configure index 2f30a13..10f2f07 100755 --- a/configure +++ b/configure @@ -1,22 +1,18 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.65 for libipfix 0.8.2 . -# +# Generated by GNU Autoconf 2.63 for libipfix 0.8.2 . # # Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, -# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 Free Software Foundation, -# Inc. -# -# +# 2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. # This configure script is free software; the Free Software Foundation # gives unlimited permission to copy, distribute and modify it. -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which @@ -24,15 +20,23 @@ if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; esac + fi + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + as_nl=' ' export as_nl @@ -40,13 +44,7 @@ export as_nl as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then +if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else @@ -57,7 +55,7 @@ else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; - case $arg in #( + case $arg in *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; @@ -80,6 +78,13 @@ if test "${PATH_SEPARATOR+set}" != set; then } fi +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + # IFS # We need space, tab and new line, in precisely that order. Quoting is @@ -89,15 +94,15 @@ fi IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. -case $0 in #(( +case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done IFS=$as_save_IFS ;; @@ -109,16 +114,12 @@ if test "x$as_myself" = x; then fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 + { (exit 1); exit 1; } fi -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' @@ -130,248 +131,7 @@ export LC_ALL LANGUAGE=C export LANGUAGE -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - -if test "x$CONFIG_SHELL" = x; then - as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : - emulate sh - NULLCMD=: - # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which - # is contrary to our usage. Disable this feature. - alias -g '\${1+\"\$@\"}'='\"\$@\"' - setopt NO_GLOB_SUBST -else - case \`(set -o) 2>/dev/null\` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; -esac -fi -" - as_required="as_fn_return () { (exit \$1); } -as_fn_success () { as_fn_return 0; } -as_fn_failure () { as_fn_return 1; } -as_fn_ret_success () { return 0; } -as_fn_ret_failure () { return 1; } - -exitcode=0 -as_fn_success || { exitcode=1; echo as_fn_success failed.; } -as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } -as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } -as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } -if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : - -else - exitcode=1; echo positional parameters were not saved. -fi -test x\$exitcode = x0 || exit 1" - as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO - as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO - eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && - test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 -test \$(( 1 + 1 )) = 2 || exit 1" - if (eval "$as_required") 2>/dev/null; then : - as_have_required=yes -else - as_have_required=no -fi - if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : - -else - as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -as_found=false -for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - as_found=: - case $as_dir in #( - /*) - for as_base in sh bash ksh sh5; do - # Try only shells that exist, to save several forks. - as_shell=$as_dir/$as_base - if { test -f "$as_shell" || test -f "$as_shell.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : - CONFIG_SHELL=$as_shell as_have_required=yes - if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : - break 2 -fi -fi - done;; - esac - as_found=false -done -$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && - { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : - CONFIG_SHELL=$SHELL as_have_required=yes -fi; } -IFS=$as_save_IFS - - - if test "x$CONFIG_SHELL" != x; then : - # We cannot yet assume a decent shell, so we have to provide a - # neutralization value for shells without unset; and this also - # works around shells that cannot unset nonexistent variables. - BASH_ENV=/dev/null - ENV=/dev/null - (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV - export CONFIG_SHELL - exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} -fi - - if test x$as_have_required = xno; then : - $as_echo "$0: This script requires a shell more modern than all" - $as_echo "$0: the shells that I found on your system." - if test x${ZSH_VERSION+set} = xset ; then - $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" - $as_echo "$0: be upgraded to zsh 4.3.4 or later." - else - $as_echo "$0: Please tell bug-autoconf@gnu.org about your system, -$0: including any error possibly output before this -$0: message. Then install a modern shell, or manually run -$0: the script under such a shell if you do have one." - fi - exit 1 -fi -fi -fi -SHELL=${CONFIG_SHELL-/bin/sh} -export SHELL -# Unset more variables known to interfere with behavior of common tools. -CLICOLOR_FORCE= GREP_OPTIONS= -unset CLICOLOR_FORCE GREP_OPTIONS - -## --------------------- ## -## M4sh Shell Functions. ## -## --------------------- ## -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" - - -} # as_fn_mkdir_p -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - -# as_fn_error ERROR [LINENO LOG_FD] -# --------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with status $?, using 1 if that was 0. -as_fn_error () -{ - as_status=$?; test $as_status -eq 0 && as_status=1 - if test "$3"; then - as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 - fi - $as_echo "$as_me: error: $1" >&2 - as_fn_exit $as_status -} # as_fn_error - +# Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr @@ -385,12 +145,8 @@ else as_basename=false fi -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi +# Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ @@ -410,126 +166,414 @@ $as_echo X/"$0" | } s/.*/./; q'` -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits +# CDPATH. +$as_unset CDPATH - as_lineno_1=$LINENO as_lineno_1a=$LINENO - as_lineno_2=$LINENO as_lineno_2a=$LINENO - eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && - test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { - # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) - sed -n ' - p - /[$]LINENO/= - ' <$as_myself | - sed ' - s/[$]LINENO.*/&-/ - t lineno - b - :lineno - N - :loop - s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ - t loop - s/-\n.*// - ' >$as_me.lineno && - chmod +x "$as_me.lineno" || - { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } +if test "x$CONFIG_SHELL" = x; then + if (eval ":") 2>/dev/null; then + as_have_required=yes +else + as_have_required=no +fi - # Don't try to exec as it changes $[0], causing all sort of problems - # (the dirname of $[0] is not the place where we might find the - # original and so on. Autoconf is especially sensitive to this). - . "./$as_me.lineno" - # Exit status is that of the last command. - exit + if test $as_have_required = yes && (eval ": +(as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 } -ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( --n*) - case `echo 'xy\c'` in - *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; - esac;; -*) - ECHO_N='-n';; -esac - -rm -f conf$$ conf$$.exe conf$$.file -if test -d conf$$.dir; then - rm -f conf$$.dir/conf$$.file +exitcode=0 +if as_func_success; then + : else - rm -f conf$$.dir - mkdir conf$$.dir 2>/dev/null + exitcode=1 + echo as_func_success failed. fi -if (echo >conf$$.file) 2>/dev/null; then - if ln -s conf$$.file conf$$ 2>/dev/null; then - as_ln_s='ln -s' - # ... but there are two gotchas: - # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. - # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. - # In both cases, we have to default to `cp -p'. - ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || - as_ln_s='cp -p' - elif ln conf$$.file conf$$ 2>/dev/null; then - as_ln_s=ln - else - as_ln_s='cp -p' - fi -else - as_ln_s='cp -p' + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. fi -rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file -rmdir conf$$.dir 2>/dev/null -if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' +if as_func_ret_success; then + : else - test -d ./-p && rmdir ./-p - as_mkdir_p=false + exitcode=1 + echo as_func_ret_success failed. fi -if test -x / >/dev/null 2>&1; then - as_test_x='test -x' +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : else - if ls -dL / >/dev/null 2>&1; then - as_ls_L_option=L - else - as_ls_L_option= - fi - as_test_x=' - eval sh -c '\'' - if test -d "$1"; then - test -d "$1/."; - else - case $1 in #( - -*)set "./$1";; - esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( - ???[sx]*):;;*)false;;esac;fi - '\'' sh - ' + exitcode=1 + echo positional parameters were not saved. fi -as_executable_p=$as_test_x -# Sed expression to map a string onto a valid CPP name. -as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" +test \$exitcode = 0) || { (exit 1); exit 1; } -# Sed expression to map a string onto a valid variable name. -as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" +( + as_lineno_1=\$LINENO + as_lineno_2=\$LINENO + test \"x\$as_lineno_1\" != \"x\$as_lineno_2\" && + test \"x\`expr \$as_lineno_1 + 1\`\" = \"x\$as_lineno_2\") || { (exit 1); exit 1; } +") 2> /dev/null; then + : +else + as_candidate_shells= + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + case $as_dir in + /*) + for as_base in sh bash ksh sh5; do + as_candidate_shells="$as_candidate_shells $as_dir/$as_base" + done;; + esac +done +IFS=$as_save_IFS -test -n "$DJDIR" || exec 7<&0 &1 + for as_shell in $as_candidate_shells $SHELL; do + # Try only shells that exist, to save several forks. + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { ("$as_shell") 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac -# Name of the host. +fi + + +: +_ASEOF +}; then + CONFIG_SHELL=$as_shell + as_have_required=yes + if { "$as_shell" 2> /dev/null <<\_ASEOF +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; +esac + +fi + + +: +(as_func_return () { + (exit $1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = "$1" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test $exitcode = 0) || { (exit 1); exit 1; } + +( + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2") || { (exit 1); exit 1; } + +_ASEOF +}; then + break +fi + +fi + + done + + if test "x$CONFIG_SHELL" != x; then + for as_var in BASH_ENV ENV + do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var + done + export CONFIG_SHELL + exec "$CONFIG_SHELL" "$as_myself" ${1+"$@"} +fi + + + if test $as_have_required = no; then + echo This script requires a shell more modern than all the + echo shells that I found on your system. Please install a + echo modern shell, or manually run the script under such a + echo shell if you do have one. + { (exit 1); exit 1; } +fi + + +fi + +fi + + + +(eval "as_func_return () { + (exit \$1) +} +as_func_success () { + as_func_return 0 +} +as_func_failure () { + as_func_return 1 +} +as_func_ret_success () { + return 0 +} +as_func_ret_failure () { + return 1 +} + +exitcode=0 +if as_func_success; then + : +else + exitcode=1 + echo as_func_success failed. +fi + +if as_func_failure; then + exitcode=1 + echo as_func_failure succeeded. +fi + +if as_func_ret_success; then + : +else + exitcode=1 + echo as_func_ret_success failed. +fi + +if as_func_ret_failure; then + exitcode=1 + echo as_func_ret_failure succeeded. +fi + +if ( set x; as_func_ret_success y && test x = \"\$1\" ); then + : +else + exitcode=1 + echo positional parameters were not saved. +fi + +test \$exitcode = 0") || { + echo No shell found that supports shell functions. + echo Please tell bug-autoconf@gnu.org about your system, + echo including any error possibly output before this message. + echo This can help us improve future autoconf versions. + echo Configuration will now proceed without shell functions. +} + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in +-n*) + case `echo 'x\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + *) ECHO_C='\c';; + esac;; +*) + ECHO_N='-n';; +esac +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p=: +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + + +exec 7<&0 &1 + +# Name of the host. # hostname on some systems (SVR3.2, Linux) returns a bogus exit status, # so uname gets run too. ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` @@ -545,6 +589,7 @@ cross_compiling=no subdirs= MFLAGS= MAKEFLAGS= +SHELL=${CONFIG_SHELL-/bin/sh} # Identity of this package. PACKAGE_NAME='libipfix' @@ -552,7 +597,6 @@ PACKAGE_TARNAME='libipfix' PACKAGE_VERSION='0.8.2 ' PACKAGE_STRING='libipfix 0.8.2 ' PACKAGE_BUGREPORT='' -PACKAGE_URL='' ac_unique_file="lib/ipfix.h" # Factoring default headers for most tests. @@ -602,6 +646,7 @@ IPFIX_DB_OBJ MYSQLLIBS IPFIX_SSL_OBJ SSLLIBS +IPFIX_JSONLINES_OBJ SCTPLIBS OPENSSL INSTALL_DATA @@ -656,7 +701,6 @@ bindir program_transform_name prefix exec_prefix -PACKAGE_URL PACKAGE_BUGREPORT PACKAGE_STRING PACKAGE_VERSION @@ -669,6 +713,7 @@ ac_user_opts=' enable_option_checking enable_ipv6 enable_sctp +enable_jsonlines with_ssl with_mysql with_pcap @@ -790,7 +835,8 @@ do ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid feature name: $ac_useropt" + { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in @@ -816,7 +862,8 @@ do ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid feature name: $ac_useropt" + { $as_echo "$as_me: error: invalid feature name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in @@ -1020,7 +1067,8 @@ do ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid package name: $ac_useropt" + { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in @@ -1036,7 +1084,8 @@ do ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` # Reject names that are not valid shell variable names. expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && - as_fn_error "invalid package name: $ac_useropt" + { $as_echo "$as_me: error: invalid package name: $ac_useropt" >&2 + { (exit 1); exit 1; }; } ac_useropt_orig=$ac_useropt ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` case $ac_user_opts in @@ -1066,17 +1115,17 @@ do | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) x_libraries=$ac_optarg ;; - -*) as_fn_error "unrecognized option: \`$ac_option' -Try \`$0 --help' for more information." + -*) { $as_echo "$as_me: error: unrecognized option: $ac_option +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } ;; *=*) ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` # Reject names that are not valid shell variable names. - case $ac_envvar in #( - '' | [0-9]* | *[!_$as_cr_alnum]* ) - as_fn_error "invalid variable name: \`$ac_envvar'" ;; - esac + expr "x$ac_envvar" : ".*[^_$as_cr_alnum]" >/dev/null && + { $as_echo "$as_me: error: invalid variable name: $ac_envvar" >&2 + { (exit 1); exit 1; }; } eval $ac_envvar=\$ac_optarg export $ac_envvar ;; @@ -1093,13 +1142,15 @@ done if test -n "$ac_prev"; then ac_option=--`echo $ac_prev | sed 's/_/-/g'` - as_fn_error "missing argument to $ac_option" + { $as_echo "$as_me: error: missing argument to $ac_option" >&2 + { (exit 1); exit 1; }; } fi if test -n "$ac_unrecognized_opts"; then case $enable_option_checking in no) ;; - fatal) as_fn_error "unrecognized options: $ac_unrecognized_opts" ;; + fatal) { $as_echo "$as_me: error: unrecognized options: $ac_unrecognized_opts" >&2 + { (exit 1); exit 1; }; } ;; *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; esac fi @@ -1122,7 +1173,8 @@ do [\\/$]* | ?:[\\/]* ) continue;; NONE | '' ) case $ac_var in *prefix ) continue;; esac;; esac - as_fn_error "expected an absolute directory name for --$ac_var: $ac_val" + { $as_echo "$as_me: error: expected an absolute directory name for --$ac_var: $ac_val" >&2 + { (exit 1); exit 1; }; } done # There might be people who depend on the old broken behavior: `$host' @@ -1152,9 +1204,11 @@ test "$silent" = yes && exec 6>/dev/null ac_pwd=`pwd` && test -n "$ac_pwd" && ac_ls_di=`ls -di .` && ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || - as_fn_error "working directory cannot be determined" + { $as_echo "$as_me: error: working directory cannot be determined" >&2 + { (exit 1); exit 1; }; } test "X$ac_ls_di" = "X$ac_pwd_ls_di" || - as_fn_error "pwd does not report name of working directory" + { $as_echo "$as_me: error: pwd does not report name of working directory" >&2 + { (exit 1); exit 1; }; } # Find the source files, if location was not specified. @@ -1193,11 +1247,13 @@ else fi if test ! -r "$srcdir/$ac_unique_file"; then test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." - as_fn_error "cannot find sources ($ac_unique_file) in $srcdir" + { $as_echo "$as_me: error: cannot find sources ($ac_unique_file) in $srcdir" >&2 + { (exit 1); exit 1; }; } fi ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" ac_abs_confdir=`( - cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error "$ac_msg" + cd "$srcdir" && test -r "./$ac_unique_file" || { $as_echo "$as_me: error: $ac_msg" >&2 + { (exit 1); exit 1; }; } pwd)` # When building in place, set srcdir=. if test "$ac_abs_confdir" = "$ac_pwd"; then @@ -1304,6 +1360,9 @@ Optional Features: --enable-sctp enable sctp support --disable-sctp disable sctp support + --enable-jsonlines enable jsonlines support + --disable-jsonlines disable jsonlines support + Optional Packages: --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) @@ -1318,14 +1377,13 @@ Some influential environment variables: LDFLAGS linker flags, e.g. -L if you have libraries in a nonstandard directory LIBS libraries to pass to the linker, e.g. -l - CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + CPPFLAGS C/C++/Objective C preprocessor flags, e.g. -I if you have headers in a nonstandard directory CPP C preprocessor Use these variables to override the choices made by `configure' or to help it to find libraries and programs with nonstandard names/locations. -Report bugs to the package provider. _ACEOF ac_status=$? fi @@ -1389,445 +1447,40 @@ test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF libipfix configure 0.8.2 -generated by GNU Autoconf 2.65 +generated by GNU Autoconf 2.63 -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +2002, 2003, 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. This configure script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it. _ACEOF exit fi +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. -## ------------------------ ## -## Autoconf initialization. ## -## ------------------------ ## - -# ac_fn_c_try_compile LINENO -# -------------------------- -# Try to compile conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext - if { { ac_try="$ac_compile" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compile") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest.$ac_objext; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval +It was created by libipfix $as_me 0.8.2 , which was +generated by GNU Autoconf 2.63. Invocation command line was -} # ac_fn_c_try_compile + $ $0 $@ -# ac_fn_c_try_link LINENO -# ----------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_link () +_ACEOF +exec 5>>config.log { - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - rm -f conftest.$ac_objext conftest$ac_exeext - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { - test -z "$ac_c_werror_flag" || - test ! -s conftest.err - } && test -s conftest$ac_exeext && { - test "$cross_compiling" = yes || - $as_test_x conftest$ac_exeext - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## - ac_retval=1 -fi - # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information - # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would - # interfere with the next link command; also delete a directory that is - # left behind by Apple's compiler. We do this before executing the actions. - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` -} # ac_fn_c_try_link - -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_func - -# ac_fn_c_try_cpp LINENO -# ---------------------- -# Try to preprocess conftest.$ac_ext, and return whether this succeeded. -ac_fn_c_try_cpp () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_cpp conftest.$ac_ext" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err - ac_status=$? - if test -s conftest.err; then - grep -v '^ *+' conftest.err >conftest.er1 - cat conftest.er1 >&5 - mv -f conftest.er1 conftest.err - fi - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } >/dev/null && { - test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || - test ! -s conftest.err - }; then : - ac_retval=0 -else - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=1 -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval - -} # ac_fn_c_try_cpp - -# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists, giving a warning if it cannot be compiled using -# the include files in INCLUDES and setting the cache variable VAR -# accordingly. -ac_fn_c_check_header_mongrel () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -else - # Is the header compilable? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5 -$as_echo_n "checking $2 usability... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_header_compiler=yes -else - ac_header_compiler=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5 -$as_echo "$ac_header_compiler" >&6; } - -# Is the header present? -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5 -$as_echo_n "checking $2 presence... " >&6; } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include <$2> -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - ac_header_preproc=yes -else - ac_header_preproc=no -fi -rm -f conftest.err conftest.$ac_ext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5 -$as_echo "$ac_header_preproc" >&6; } - -# So? What about this header? -case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #(( - yes:no: ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5 -$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; - no:yes:* ) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5 -$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: check for missing prerequisite headers?" >&5 -$as_echo "$as_me: WARNING: $2: check for missing prerequisite headers?" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5 -$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&5 -$as_echo "$as_me: WARNING: $2: section \"Present But Cannot Be Compiled\"" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5 -$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;} - ;; -esac - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=\$ac_header_compiler" -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } -fi - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_header_mongrel - -# ac_fn_c_try_run LINENO -# ---------------------- -# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes -# that executables *can* be run. -ac_fn_c_try_run () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - if { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then : - ac_retval=0 -else - $as_echo "$as_me: program exited with status $ac_status" >&5 - $as_echo "$as_me: failed program was:" >&5 -sed 's/^/| /' conftest.$ac_ext >&5 - - ac_retval=$ac_status -fi - rm -rf conftest.dSYM conftest_ipa8_conftest.oo - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - as_fn_set_status $ac_retval - -} # ac_fn_c_try_run - -# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES -# ------------------------------------------------------- -# Tests whether HEADER exists and can be compiled using the include files in -# INCLUDES, setting the cache variable VAR accordingly. -ac_fn_c_check_header_compile () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -#include <$2> -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_header_compile - -# ac_fn_c_check_type LINENO TYPE VAR INCLUDES -# ------------------------------------------- -# Tests whether TYPE exists after having included INCLUDES, setting cache -# variable VAR accordingly. -ac_fn_c_check_type () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if { as_var=$3; eval "test \"\${$as_var+set}\" = set"; }; then : - $as_echo_n "(cached) " >&6 -else - eval "$3=no" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof ($2)) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$4 -int -main () -{ -if (sizeof (($2))) - return 0; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - -else - eval "$3=yes" -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; test "x$as_lineno_stack" = x && { as_lineno=; unset as_lineno;} - -} # ac_fn_c_check_type -cat >config.log <<_ACEOF -This file contains any messages produced by compilers while -running configure, to aid debugging if configure makes a mistake. - -It was created by libipfix $as_me 0.8.2 , which was -generated by GNU Autoconf 2.65. Invocation command line was - - $ $0 $@ - -_ACEOF -exec 5>>config.log -{ -cat <<_ASUNAME -## --------- ## -## Platform. ## -## --------- ## - -hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` -uname -m = `(uname -m) 2>/dev/null || echo unknown` -uname -r = `(uname -r) 2>/dev/null || echo unknown` -uname -s = `(uname -s) 2>/dev/null || echo unknown` -uname -v = `(uname -v) 2>/dev/null || echo unknown` - -/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` -/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` /bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` /usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` @@ -1844,8 +1497,8 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - $as_echo "PATH: $as_dir" - done + $as_echo "PATH: $as_dir" +done IFS=$as_save_IFS } >&5 @@ -1882,9 +1535,9 @@ do ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; esac case $ac_pass in - 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 1) ac_configure_args0="$ac_configure_args0 '$ac_arg'" ;; 2) - as_fn_append ac_configure_args1 " '$ac_arg'" + ac_configure_args1="$ac_configure_args1 '$ac_arg'" if test $ac_must_keep_next = true; then ac_must_keep_next=false # Got value, back to normal. else @@ -1900,13 +1553,13 @@ do -* ) ac_must_keep_next=true ;; esac fi - as_fn_append ac_configure_args " '$ac_arg'" + ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac done done -{ ac_configure_args0=; unset ac_configure_args0;} -{ ac_configure_args1=; unset ac_configure_args1;} +$as_unset ac_configure_args0 || test "${ac_configure_args0+set}" != set || { ac_configure_args0=; export ac_configure_args0; } +$as_unset ac_configure_args1 || test "${ac_configure_args1+set}" != set || { ac_configure_args1=; export ac_configure_args1; } # When interrupted or exit'd, cleanup temporary files, and complete # config.log. We remove comments because anyway the quotes in there @@ -1931,13 +1584,13 @@ _ASBOX case $ac_val in #( *${as_nl}*) case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 + *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; + *) $as_unset $ac_var ;; esac ;; esac done @@ -2009,39 +1662,37 @@ _ASBOX exit $exit_status ' 0 for ac_signal in 1 2 13 15; do - trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal + trap 'ac_signal='$ac_signal'; { (exit 1); exit 1; }' $ac_signal done ac_signal=0 # confdefs.h avoids OS command line length limits that DEFS can exceed. rm -f -r conftest* confdefs.h -$as_echo "/* confdefs.h */" > confdefs.h - # Predefined preprocessor variables. cat >>confdefs.h <<_ACEOF #define PACKAGE_NAME "$PACKAGE_NAME" _ACEOF + cat >>confdefs.h <<_ACEOF #define PACKAGE_TARNAME "$PACKAGE_TARNAME" _ACEOF + cat >>confdefs.h <<_ACEOF #define PACKAGE_VERSION "$PACKAGE_VERSION" _ACEOF + cat >>confdefs.h <<_ACEOF #define PACKAGE_STRING "$PACKAGE_STRING" _ACEOF -cat >>confdefs.h <<_ACEOF -#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" -_ACEOF cat >>confdefs.h <<_ACEOF -#define PACKAGE_URL "$PACKAGE_URL" +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" _ACEOF @@ -2061,8 +1712,8 @@ fi for ac_site_file in "$ac_site_file1" "$ac_site_file2" do test "x$ac_site_file" = xNONE && continue - if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 + if test -r "$ac_site_file"; then + { $as_echo "$as_me:$LINENO: loading site script $ac_site_file" >&5 $as_echo "$as_me: loading site script $ac_site_file" >&6;} sed 's/^/| /' "$ac_site_file" >&5 . "$ac_site_file" @@ -2070,10 +1721,10 @@ $as_echo "$as_me: loading site script $ac_site_file" >&6;} done if test -r "$cache_file"; then - # Some versions of bash will fail to source /dev/null (special files - # actually), so we avoid doing that. DJGPP emulates it as a regular file. - if test /dev/null != "$cache_file" && test -f "$cache_file"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 + # Some versions of bash will fail to source /dev/null (special + # files actually), so we avoid doing that. + if test -f "$cache_file"; then + { $as_echo "$as_me:$LINENO: loading cache $cache_file" >&5 $as_echo "$as_me: loading cache $cache_file" >&6;} case $cache_file in [\\/]* | ?:[\\/]* ) . "$cache_file";; @@ -2081,7 +1732,7 @@ $as_echo "$as_me: loading cache $cache_file" >&6;} esac fi else - { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 + { $as_echo "$as_me:$LINENO: creating cache $cache_file" >&5 $as_echo "$as_me: creating cache $cache_file" >&6;} >$cache_file fi @@ -2096,11 +1747,11 @@ for ac_var in $ac_precious_vars; do eval ac_new_val=\$ac_env_${ac_var}_value case $ac_old_set,$ac_new_set in set,) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 + { $as_echo "$as_me:$LINENO: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} ac_cache_corrupted=: ;; ,set) - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 + { $as_echo "$as_me:$LINENO: error: \`$ac_var' was not set in the previous run" >&5 $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_cache_corrupted=: ;; ,);; @@ -2110,17 +1761,17 @@ $as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} ac_old_val_w=`echo x $ac_old_val` ac_new_val_w=`echo x $ac_new_val` if test "$ac_old_val_w" != "$ac_new_val_w"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 + { $as_echo "$as_me:$LINENO: error: \`$ac_var' has changed since the previous run:" >&5 $as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} ac_cache_corrupted=: else - { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 + { $as_echo "$as_me:$LINENO: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 $as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} eval $ac_var=\$ac_old_val fi - { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 + { $as_echo "$as_me:$LINENO: former value: \`$ac_old_val'" >&5 $as_echo "$as_me: former value: \`$ac_old_val'" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 + { $as_echo "$as_me:$LINENO: current value: \`$ac_new_val'" >&5 $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} fi;; esac @@ -2132,20 +1783,43 @@ $as_echo "$as_me: current value: \`$ac_new_val'" >&2;} esac case " $ac_configure_args " in *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. - *) as_fn_append ac_configure_args " '$ac_arg'" ;; + *) ac_configure_args="$ac_configure_args '$ac_arg'" ;; esac fi done if $ac_cache_corrupted; then - { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 + { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} - { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 + { $as_echo "$as_me:$LINENO: error: changes in the environment can compromise the build" >&5 $as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} - as_fn_error "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&5 +$as_echo "$as_me: error: run \`make distclean' and/or \`rm $cache_file' and start over" >&2;} + { (exit 1); exit 1; }; } fi -## -------------------- ## -## Main body of script. ## -## -------------------- ## + + + + + + + + + + + + + + + + + + + + + + + + ac_ext=c ac_cpp='$CPP $CPPFLAGS' @@ -2157,16 +1831,24 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu ac_aux_dir= for ac_dir in config "$srcdir"/config; do - for ac_t in install-sh install.sh shtool; do - if test -f "$ac_dir/$ac_t"; then - ac_aux_dir=$ac_dir - ac_install_sh="$ac_aux_dir/$ac_t -c" - break 2 - fi - done + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi done if test -z "$ac_aux_dir"; then - as_fn_error "cannot find install-sh, install.sh, or shtool in config \"$srcdir\"/config" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: cannot find install-sh or install.sh in config \"$srcdir\"/config" >&5 +$as_echo "$as_me: error: cannot find install-sh or install.sh in config \"$srcdir\"/config" >&2;} + { (exit 1); exit 1; }; } fi # These three variables are undocumented and unsupported, @@ -2183,27 +1865,35 @@ ac_config_headers="$ac_config_headers config.h" # Make sure we can run config.sub. $SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || - as_fn_error "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: cannot run $SHELL $ac_aux_dir/config.sub" >&5 +$as_echo "$as_me: error: cannot run $SHELL $ac_aux_dir/config.sub" >&2;} + { (exit 1); exit 1; }; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +{ $as_echo "$as_me:$LINENO: checking build system type" >&5 $as_echo_n "checking build system type... " >&6; } -if test "${ac_cv_build+set}" = set; then : +if test "${ac_cv_build+set}" = set; then $as_echo_n "(cached) " >&6 else ac_build_alias=$build_alias test "x$ac_build_alias" = x && ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` test "x$ac_build_alias" = x && - as_fn_error "cannot guess build type; you must specify one" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: cannot guess build type; you must specify one" >&5 +$as_echo "$as_me: error: cannot guess build type; you must specify one" >&2;} + { (exit 1); exit 1; }; } ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || - as_fn_error "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&5 +$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $ac_build_alias failed" >&2;} + { (exit 1); exit 1; }; } fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_build" >&5 $as_echo "$ac_cv_build" >&6; } case $ac_cv_build in *-*-*) ;; -*) as_fn_error "invalid value of canonical build" "$LINENO" 5;; +*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical build" >&5 +$as_echo "$as_me: error: invalid value of canonical build" >&2;} + { (exit 1); exit 1; }; };; esac build=$ac_cv_build ac_save_IFS=$IFS; IFS='-' @@ -2219,24 +1909,28 @@ IFS=$ac_save_IFS case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +{ $as_echo "$as_me:$LINENO: checking host system type" >&5 $as_echo_n "checking host system type... " >&6; } -if test "${ac_cv_host+set}" = set; then : +if test "${ac_cv_host+set}" = set; then $as_echo_n "(cached) " >&6 else if test "x$host_alias" = x; then ac_cv_host=$ac_cv_build else ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || - as_fn_error "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&5 +$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $host_alias failed" >&2;} + { (exit 1); exit 1; }; } fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_host" >&5 $as_echo "$ac_cv_host" >&6; } case $ac_cv_host in *-*-*) ;; -*) as_fn_error "invalid value of canonical host" "$LINENO" 5;; +*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical host" >&5 +$as_echo "$as_me: error: invalid value of canonical host" >&2;} + { (exit 1); exit 1; }; };; esac host=$ac_cv_host ac_save_IFS=$IFS; IFS='-' @@ -2252,24 +1946,28 @@ IFS=$ac_save_IFS case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target system type" >&5 +{ $as_echo "$as_me:$LINENO: checking target system type" >&5 $as_echo_n "checking target system type... " >&6; } -if test "${ac_cv_target+set}" = set; then : +if test "${ac_cv_target+set}" = set; then $as_echo_n "(cached) " >&6 else if test "x$target_alias" = x; then ac_cv_target=$ac_cv_host else ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` || - as_fn_error "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: $SHELL $ac_aux_dir/config.sub $target_alias failed" >&5 +$as_echo "$as_me: error: $SHELL $ac_aux_dir/config.sub $target_alias failed" >&2;} + { (exit 1); exit 1; }; } fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_target" >&5 $as_echo "$ac_cv_target" >&6; } case $ac_cv_target in *-*-*) ;; -*) as_fn_error "invalid value of canonical target" "$LINENO" 5;; +*) { { $as_echo "$as_me:$LINENO: error: invalid value of canonical target" >&5 +$as_echo "$as_me: error: invalid value of canonical target" >&2;} + { (exit 1); exit 1; }; };; esac target=$ac_cv_target ac_save_IFS=$IFS; IFS='-' @@ -2301,9 +1999,9 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. set dummy ${ac_tool_prefix}gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -2314,24 +2012,24 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do + for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done - done +done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 + { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -2341,9 +2039,9 @@ if test -z "$ac_cv_prog_CC"; then ac_ct_CC=$CC # Extract the first word of "gcc", so it can be a program name with args. set dummy gcc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then @@ -2354,24 +2052,24 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do + for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="gcc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done - done +done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 + { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -2380,7 +2078,7 @@ fi else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac @@ -2394,9 +2092,9 @@ if test -z "$CC"; then if test -n "$ac_tool_prefix"; then # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. set dummy ${ac_tool_prefix}cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -2407,24 +2105,24 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do + for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="${ac_tool_prefix}cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done - done +done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 + { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -2434,9 +2132,9 @@ fi if test -z "$CC"; then # Extract the first word of "cc", so it can be a program name with args. set dummy cc; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -2448,18 +2146,18 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do + for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then ac_prog_rejected=yes continue fi ac_cv_prog_CC="cc" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done - done +done IFS=$as_save_IFS if test $ac_prog_rejected = yes; then @@ -2478,10 +2176,10 @@ fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 + { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -2493,9 +2191,9 @@ if test -z "$CC"; then do # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. set dummy $ac_tool_prefix$ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_CC+set}" = set; then : +if test "${ac_cv_prog_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$CC"; then @@ -2506,24 +2204,24 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do + for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_CC="$ac_tool_prefix$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done - done +done IFS=$as_save_IFS fi fi CC=$ac_cv_prog_CC if test -n "$CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 + { $as_echo "$as_me:$LINENO: result: $CC" >&5 $as_echo "$CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -2537,9 +2235,9 @@ if test -z "$CC"; then do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_prog_ac_ct_CC+set}" = set; then : +if test "${ac_cv_prog_ac_ct_CC+set}" = set; then $as_echo_n "(cached) " >&6 else if test -n "$ac_ct_CC"; then @@ -2550,24 +2248,24 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do + for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_prog_ac_ct_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done - done +done IFS=$as_save_IFS fi fi ac_ct_CC=$ac_cv_prog_ac_ct_CC if test -n "$ac_ct_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 + { $as_echo "$as_me:$LINENO: result: $ac_ct_CC" >&5 $as_echo "$ac_ct_CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -2580,7 +2278,7 @@ done else case $cross_compiling:$ac_tool_warned in yes:) -{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +{ $as_echo "$as_me:$LINENO: WARNING: using cross tools not prefixed with host triplet" >&5 $as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} ac_tool_warned=yes ;; esac @@ -2591,37 +2289,57 @@ fi fi -test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +test -z "$CC" && { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "no acceptable C compiler found in \$PATH -See \`config.log' for more details." "$LINENO" 5; } +{ { $as_echo "$as_me:$LINENO: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: no acceptable C compiler found in \$PATH +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } # Provide some information about the compiler. -$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +$as_echo "$as_me:$LINENO: checking for C compiler version" >&5 set X $ac_compile ac_compiler=$2 -for ac_option in --version -v -V -qversion; do - { { ac_try="$ac_compiler $ac_option >&5" +{ (ac_try="$ac_compiler --version >&5" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_compiler $ac_option >&5") 2>conftest.err +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compiler --version >&5") 2>&5 ac_status=$? - if test -s conftest.err; then - sed '10a\ -... rest of stderr output deleted ... - 10q' conftest.err >conftest.er1 - cat conftest.er1 >&5 - fi - rm -f conftest.er1 conftest.err - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } -done + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -v >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compiler -v >&5") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } +{ (ac_try="$ac_compiler -V >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compiler -V >&5") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -2637,8 +2355,8 @@ ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" # Try to create an executable without -o first, disregard a.out. # It will help us diagnose broken compilers, and finding out an intuition # of exeext. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 -$as_echo_n "checking whether the C compiler works... " >&6; } +{ $as_echo "$as_me:$LINENO: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` # The possible output files: @@ -2654,17 +2372,17 @@ do done rm -f $ac_rmfiles -if { { ac_try="$ac_link_default" +if { (ac_try="$ac_link_default" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_link_default") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. # So ignore a value of `no', otherwise this would lead to `EXEEXT = no' # in a Makefile. We should not override ac_cv_exeext if it was cached, @@ -2681,7 +2399,7 @@ do # certainly right. break;; *.* ) - if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; then :; else ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` fi @@ -2700,42 +2418,84 @@ test "$ac_cv_exeext" = no && ac_cv_exeext= else ac_file='' fi -if test -z "$ac_file"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -$as_echo "$as_me: failed program was:" >&5 + +{ $as_echo "$as_me:$LINENO: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +if test -z "$ac_file"; then + $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -{ as_fn_set_status 77 -as_fn_error "C compiler cannot create executables -See \`config.log' for more details." "$LINENO" 5; }; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } +{ { $as_echo "$as_me:$LINENO: error: C compiler cannot create executables +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: C compiler cannot create executables +See \`config.log' for more details." >&2;} + { (exit 77); exit 77; }; }; } fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 -$as_echo_n "checking for C compiler default output file name... " >&6; } -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 -$as_echo "$ac_file" >&6; } + ac_exeext=$ac_cv_exeext +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:$LINENO: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +# FIXME: These cross compiler hacks should be removed for Autoconf 3.0 +# If not cross compiling, check that we can run a simple program. +if test "$cross_compiling" != yes; then + if { ac_try='./$ac_file' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +{ { $as_echo "$as_me:$LINENO: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } + fi + fi +fi +{ $as_echo "$as_me:$LINENO: result: yes" >&5 +$as_echo "yes" >&6; } + rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:$LINENO: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +{ $as_echo "$as_me:$LINENO: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +{ $as_echo "$as_me:$LINENO: checking for suffix of executables" >&5 $as_echo_n "checking for suffix of executables... " >&6; } -if { { ac_try="$ac_link" +if { (ac_try="$ac_link" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_link") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then # If both `conftest.exe' and `conftest' are `present' (well, observable) # catch `conftest.exe'. For instance with Cygwin, `ls conftest' will # work properly (i.e., refer to `conftest.exe'), while it won't with @@ -2750,83 +2510,32 @@ for ac_file in conftest.exe conftest conftest.*; do esac done else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 + { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot compute suffix of executables: cannot compile and link -See \`config.log' for more details." "$LINENO" 5; } +{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } fi -rm -f conftest conftest$ac_cv_exeext -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 + +rm -f conftest$ac_cv_exeext +{ $as_echo "$as_me:$LINENO: result: $ac_cv_exeext" >&5 $as_echo "$ac_cv_exeext" >&6; } rm -f conftest.$ac_ext EXEEXT=$ac_cv_exeext ac_exeext=$EXEEXT -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -FILE *f = fopen ("conftest.out", "w"); - return ferror (f) || fclose (f) != 0; - - ; - return 0; -} -_ACEOF -ac_clean_files="$ac_clean_files conftest.out" -# Check that the compiler produces executables we can run. If not, either -# the compiler is broken, or we cross compile. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 -$as_echo_n "checking whether we are cross compiling... " >&6; } -if test "$cross_compiling" != yes; then - { { ac_try="$ac_link" -case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_link") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; } - if { ac_try='./conftest$ac_cv_exeext' - { { case "(($ac_try" in - *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; - *) ac_try_echo=$ac_try;; -esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 - (eval "$ac_try") 2>&5 - ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; }; then - cross_compiling=no - else - if test "$cross_compiling" = maybe; then - cross_compiling=yes - else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot run C compiled programs. -If you meant to cross compile, use \`--host'. -See \`config.log' for more details." "$LINENO" 5; } - fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 -$as_echo "$cross_compiling" >&6; } - -rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out -ac_clean_files=$ac_clean_files_save -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +{ $as_echo "$as_me:$LINENO: checking for suffix of object files" >&5 $as_echo_n "checking for suffix of object files... " >&6; } -if test "${ac_cv_objext+set}" = set; then : +if test "${ac_cv_objext+set}" = set; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -2838,17 +2547,17 @@ main () } _ACEOF rm -f conftest.o conftest.obj -if { { ac_try="$ac_compile" +if { (ac_try="$ac_compile" case "(($ac_try" in *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; *) ac_try_echo=$ac_try;; esac -eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" -$as_echo "$ac_try_echo"; } >&5 +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 (eval "$ac_compile") 2>&5 ac_status=$? - $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 - test $ac_status = 0; }; then : + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then for ac_file in conftest.o conftest.obj conftest.*; do test -f "$ac_file" || continue; case $ac_file in @@ -2861,23 +2570,31 @@ else $as_echo "$as_me: failed program was:" >&5 sed 's/^/| /' conftest.$ac_ext >&5 -{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +{ { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "cannot compute suffix of object files: cannot compile -See \`config.log' for more details." "$LINENO" 5; } +{ { $as_echo "$as_me:$LINENO: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: cannot compute suffix of object files: cannot compile +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } fi + rm -f conftest.$ac_cv_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_objext" >&5 $as_echo "$ac_cv_objext" >&6; } OBJEXT=$ac_cv_objext ac_objext=$OBJEXT -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +{ $as_echo "$as_me:$LINENO: checking whether we are using the GNU C compiler" >&5 $as_echo_n "checking whether we are using the GNU C compiler... " >&6; } -if test "${ac_cv_c_compiler_gnu+set}" = set; then : +if test "${ac_cv_c_compiler_gnu+set}" = set; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -2891,16 +2608,37 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_compiler_gnu=yes else - ac_compiler_gnu=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_compiler_gnu=no fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_cv_c_compiler_gnu=$ac_compiler_gnu fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_compiler_gnu" >&5 $as_echo "$ac_cv_c_compiler_gnu" >&6; } if test $ac_compiler_gnu = yes; then GCC=yes @@ -2909,16 +2647,20 @@ else fi ac_test_CFLAGS=${CFLAGS+set} ac_save_CFLAGS=$CFLAGS -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +{ $as_echo "$as_me:$LINENO: checking whether $CC accepts -g" >&5 $as_echo_n "checking whether $CC accepts -g... " >&6; } -if test "${ac_cv_prog_cc_g+set}" = set; then : +if test "${ac_cv_prog_cc_g+set}" = set; then $as_echo_n "(cached) " >&6 else ac_save_c_werror_flag=$ac_c_werror_flag ac_c_werror_flag=yes ac_cv_prog_cc_g=no CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -2929,11 +2671,35 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes else - CFLAGS="" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + CFLAGS="" + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -2944,12 +2710,36 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : else - ac_c_werror_flag=$ac_save_c_werror_flag + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_c_werror_flag=$ac_save_c_werror_flag CFLAGS="-g" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -2960,17 +2750,42 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_prog_cc_g=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext ac_c_werror_flag=$ac_save_c_werror_flag fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_g" >&5 $as_echo "$ac_cv_prog_cc_g" >&6; } if test "$ac_test_CFLAGS" = set; then CFLAGS=$ac_save_CFLAGS @@ -2987,14 +2802,18 @@ else CFLAGS= fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +{ $as_echo "$as_me:$LINENO: checking for $CC option to accept ISO C89" >&5 $as_echo_n "checking for $CC option to accept ISO C89... " >&6; } -if test "${ac_cv_prog_cc_c89+set}" = set; then : +if test "${ac_cv_prog_cc_c89+set}" = set; then $as_echo_n "(cached) " >&6 else ac_cv_prog_cc_c89=no ac_save_CC=$CC -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include @@ -3051,9 +2870,32 @@ for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" do CC="$ac_save_CC $ac_arg" - if ac_fn_c_try_compile "$LINENO"; then : + rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_prog_cc_c89=$ac_arg +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + fi + rm -f core conftest.err conftest.$ac_objext test "x$ac_cv_prog_cc_c89" != "xno" && break done @@ -3064,19 +2906,17 @@ fi # AC_CACHE_VAL case "x$ac_cv_prog_cc_c89" in x) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 + { $as_echo "$as_me:$LINENO: result: none needed" >&5 $as_echo "none needed" >&6; } ;; xno) - { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 + { $as_echo "$as_me:$LINENO: result: unsupported" >&5 $as_echo "unsupported" >&6; } ;; *) CC="$CC $ac_cv_prog_cc_c89" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 + { $as_echo "$as_me:$LINENO: result: $ac_cv_prog_cc_c89" >&5 $as_echo "$ac_cv_prog_cc_c89" >&6; } ;; esac -if test "x$ac_cv_prog_cc_c89" != xno; then : -fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' @@ -3098,10 +2938,10 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu # OS/2's system install, which has a completely different semantic # ./install, which can be erroneously created by make from ./install.sh. # Reject install programs that cannot install multiple files. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +{ $as_echo "$as_me:$LINENO: checking for a BSD-compatible install" >&5 $as_echo_n "checking for a BSD-compatible install... " >&6; } if test -z "$INSTALL"; then -if test "${ac_cv_path_install+set}" = set; then : +if test "${ac_cv_path_install+set}" = set; then $as_echo_n "(cached) " >&6 else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR @@ -3109,11 +2949,11 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - # Account for people who put trailing slashes in PATH elements. -case $as_dir/ in #(( - ./ | .// | /[cC]/* | \ + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in + ./ | .// | /cC/* | \ /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ - ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + ?:\\/os2\\/install\\/* | ?:\\/OS2\\/INSTALL\\/* | \ /usr/ucb/* ) ;; *) # OSF1 and SCO ODT 3.0 have their own names for install. @@ -3150,7 +2990,7 @@ case $as_dir/ in #(( ;; esac - done +done IFS=$as_save_IFS rm -rf conftest.one conftest.two conftest.dir @@ -3166,7 +3006,7 @@ fi INSTALL=$ac_install_sh fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +{ $as_echo "$as_me:$LINENO: result: $INSTALL" >&5 $as_echo "$INSTALL" >&6; } # Use test -z because SunOS4 sh mishandles braces in ${var-val}. @@ -3179,9 +3019,9 @@ test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' # Extract the first word of "openssl", so it can be a program name with args. set dummy openssl; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +{ $as_echo "$as_me:$LINENO: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if test "${ac_cv_path_OPENSSL+set}" = set; then : +if test "${ac_cv_path_OPENSSL+set}" = set; then $as_echo_n "(cached) " >&6 else case $OPENSSL in @@ -3194,14 +3034,14 @@ for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do + for ac_exec_ext in '' $ac_executable_extensions; do if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then ac_cv_path_OPENSSL="$as_dir/$ac_word$ac_exec_ext" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + $as_echo "$as_me:$LINENO: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi done - done +done IFS=$as_save_IFS test -z "$ac_cv_path_OPENSSL" && ac_cv_path_OPENSSL="openssl" @@ -3210,10 +3050,10 @@ esac fi OPENSSL=$ac_cv_path_OPENSSL if test -n "$OPENSSL"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OPENSSL" >&5 + { $as_echo "$as_me:$LINENO: result: $OPENSSL" >&5 $as_echo "$OPENSSL" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -3237,7 +3077,7 @@ esac # IPv6 support ################################################# # Check whether --enable-ipv6 was given. -if test "${enable_ipv6+set}" = set; then : +if test "${enable_ipv6+set}" = set; then enableval=$enable_ipv6; if test $enableval != "no" ; then CPPFLAGS="-DINET6 $CPPFLAGS" @@ -3251,7 +3091,7 @@ fi # SCTP support ################################################# # Check whether --enable-sctp was given. -if test "${enable_sctp+set}" = set; then : +if test "${enable_sctp+set}" = set; then enableval=$enable_sctp; if test $enableval != "no" ; then CPPFLAGS="-DSCTPSUPPORT $CPPFLAGS" @@ -3262,39 +3102,54 @@ fi +# +# JSONLINES support +################################################# +# Check whether --enable-jsonlines was given. +if test "${enable_jsonlines+set}" = set; then + enableval=$enable_jsonlines; + if test $enableval != "no" ; then + CPPFLAGS="-DJSONLINESSUPPORT $CPPFLAGS" + IPFIX_JSONLINES_SOURCES="ipfix_col_jsonlines.o json_util.o" + fi + +fi + + + # # enable ssl support ################################################# -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking enable tls/dtls support" >&5 +{ $as_echo "$as_me:$LINENO: checking enable tls/dtls support" >&5 $as_echo_n "checking enable tls/dtls support... " >&6; } # Check whether --with-ssl was given. -if test "${with_ssl+set}" = set; then : +if test "${with_ssl+set}" = set; then withval=$with_ssl; if test "$withval" != "no"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 + { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } if test "$withval" != "yes"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libssl in $withval/lib" >&5 + { $as_echo "$as_me:$LINENO: checking for libssl in $withval/lib" >&5 $as_echo_n "checking for libssl in $withval/lib... " >&6; } if test -f $withval/lib/libssl.a -o -f $withval/lib/libssl.so ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 + { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } LDFLAGS="$LDFLAGS -L$withval/lib" CPPFLAGS="$CPPFLAGS -I$withval/include" else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libssl in $withval" >&5 + { $as_echo "$as_me:$LINENO: checking for libssl in $withval" >&5 $as_echo_n "checking for libssl in $withval... " >&6; } if test -f $withval/libssl.a -o -f $withval/libssl.so ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 + { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } LDFLAGS="$LDFLAGS -L$withval" CPPFLAGS="$CPPFLAGS -I$withval" else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi @@ -3302,14 +3157,18 @@ $as_echo "no" >&6; } SAVELIBS=$LIBS LIBS= - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SSL_new in -lssl" >&5 + { $as_echo "$as_me:$LINENO: checking for SSL_new in -lssl" >&5 $as_echo_n "checking for SSL_new in -lssl... " >&6; } -if test "${ac_cv_lib_ssl_SSL_new+set}" = set; then : +if test "${ac_cv_lib_ssl_SSL_new+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lssl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -3327,18 +3186,43 @@ return SSL_new (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_ssl_SSL_new=yes else - ac_cv_lib_ssl_SSL_new=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_ssl_SSL_new=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ssl_SSL_new" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_ssl_SSL_new" >&5 $as_echo "$ac_cv_lib_ssl_SSL_new" >&6; } -if test "x$ac_cv_lib_ssl_SSL_new" = x""yes; then : +if test "x$ac_cv_lib_ssl_SSL_new" = x""yes; then SSLLIBS="-lssl -lcrypto" CPPFLAGS="$CPPFLAGS -DSSLSUPPORT" @@ -3346,20 +3230,22 @@ if test "x$ac_cv_lib_ssl_SSL_new" = x""yes; then : else - as_fn_error "SSL support was requested, but is not available" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: SSL support was requested, but is not available" >&5 +$as_echo "$as_me: error: SSL support was requested, but is not available" >&2;} + { (exit 1); exit 1; }; } fi SSLLIBS="$LIBS $SSLLIBS" LIBS=$SAVELIBS else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi @@ -3372,44 +3258,48 @@ fi ################################################# # Check whether --with-mysql was given. -if test "${with_mysql+set}" = set; then : +if test "${with_mysql+set}" = set; then withval=$with_mysql; if test "$withval" != "no"; then if test "$withval" != "yes"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libmysqlclient in $withval/lib" >&5 + { $as_echo "$as_me:$LINENO: checking for libmysqlclient in $withval/lib" >&5 $as_echo_n "checking for libmysqlclient in $withval/lib... " >&6; } if test -f $withval/libmysqlclient.a -o -f $withval/libmysqlclient.so ; then LDFLAGS="$LDFLAGS -L$withval " - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 + { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else if test -f $withval/lib/libmysqlclient.a -o -f $withval/lib/libmysqlclient.so ; then LDFLAGS="$LDFLAGS -L$withval/lib " CPPFLAGS="$CPPFLAGS -I$withval/include" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 + { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else if test -f $withval/lib/mysql/libmysqlclient.a -o -f $withval/lib/mysql/libmysqlclient.so ; then LDFLAGS="$LDFLAGS -L$withval/lib/mysql " CPPFLAGS="$CPPFLAGS -I$withval/include" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 + { $as_echo "$as_me:$LINENO: result: yes" >&5 $as_echo "yes" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 + { $as_echo "$as_me:$LINENO: result: no" >&5 $as_echo "no" >&6; } fi fi fi fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for mysql_init in -lmysqlclient" >&5 + { $as_echo "$as_me:$LINENO: checking for mysql_init in -lmysqlclient" >&5 $as_echo_n "checking for mysql_init in -lmysqlclient... " >&6; } -if test "${ac_cv_lib_mysqlclient_mysql_init+set}" = set; then : +if test "${ac_cv_lib_mysqlclient_mysql_init+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lmysqlclient $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -3427,18 +3317,43 @@ return mysql_init (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_mysqlclient_mysql_init=yes else - ac_cv_lib_mysqlclient_mysql_init=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_mysqlclient_mysql_init=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_mysqlclient_mysql_init" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_mysqlclient_mysql_init" >&5 $as_echo "$ac_cv_lib_mysqlclient_mysql_init" >&6; } -if test "x$ac_cv_lib_mysqlclient_mysql_init" = x""yes; then : +if test "x$ac_cv_lib_mysqlclient_mysql_init" = x""yes; then MYSQLLIBS="-lmysqlclient" IPFIX_DB_OBJ="ipfix_db.o" @@ -3447,7 +3362,7 @@ if test "x$ac_cv_lib_mysqlclient_mysql_init" = x""yes; then : else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot find mysql library" >&5 + { $as_echo "$as_me:$LINENO: WARNING: cannot find mysql library" >&5 $as_echo "$as_me: WARNING: cannot find mysql library" >&2;} fi @@ -3475,9 +3390,9 @@ esac # Check whether --with-pcap was given. -if test "${with_pcap+set}" = set; then : +if test "${with_pcap+set}" = set; then withval=$with_pcap; - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: pcap path given" >&5 + { $as_echo "$as_me:$LINENO: WARNING: pcap path given" >&5 $as_echo "$as_me: WARNING: pcap path given" >&2;} #AC_MSG_CHECKING(for lib$PCAP in $withval/lib) #if test -f $withval/lib/lib$PCAP.a -o -f $withval/lib/lib$PCAP.so ; then @@ -3494,15 +3409,20 @@ fi SAVELIBS=$LIBS LIBS= + as_ac_Lib=`$as_echo "ac_cv_lib_$PCAP''_pcap_open_live" | $as_tr_sh` -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for pcap_open_live in -l$PCAP" >&5 +{ $as_echo "$as_me:$LINENO: checking for pcap_open_live in -l$PCAP" >&5 $as_echo_n "checking for pcap_open_live in -l$PCAP... " >&6; } -if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then : +if { as_var=$as_ac_Lib; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-l$PCAP $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -3520,20 +3440,47 @@ return pcap_open_live (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then eval "$as_ac_Lib=yes" else - eval "$as_ac_Lib=no" + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_Lib=no" fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -eval ac_res=\$$as_ac_Lib - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +ac_res=`eval 'as_val=${'$as_ac_Lib'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 $as_echo "$ac_res" >&6; } -eval as_val=\$$as_ac_Lib - if test "x$as_val" = x""yes; then : +as_val=`eval 'as_val=${'$as_ac_Lib'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_LIB$PCAP" | $as_tr_cpp` 1 _ACEOF @@ -3541,87 +3488,224 @@ _ACEOF LIBS="-l$PCAP $LIBS" else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cannot find libpcap" >&5 + { $as_echo "$as_me:$LINENO: WARNING: cannot find libpcap" >&5 $as_echo "$as_me: WARNING: cannot find libpcap" >&2;} fi -for ac_func in pcap_breakloop pcap_freecode -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -eval as_val=\$$as_ac_var - if test "x$as_val" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF -fi -done -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 -$as_echo_n "checking how to run the C preprocessor... " >&6; } -# On Suns, sometimes $CPP names a directory. -if test -n "$CPP" && test -d "$CPP"; then - CPP= -fi -if test -z "$CPP"; then - if test "${ac_cv_prog_CPP+set}" = set; then : +for ac_func in pcap_breakloop pcap_freecode +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else - # Double quotes because CPP needs to be expanded - for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" - do - ac_preproc_ok=false -for ac_c_preproc_warn_flag in '' yes -do - # Use a header file that comes with gcc, so configuring glibc - # with a fresh cross-compiler works. - # Prefer to if __STDC__ is defined, since - # exists even on freestanding compilers. - # On the NeXT, cc -E runs the code through the compiler's parser, - # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + #ifdef __STDC__ # include #else # include #endif - Syntax error -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : -else - # Broken: fails on valid input. -continue -fi -rm -f conftest.err conftest.$ac_ext +#undef $ac_func - # OK, works on sane cases. Now check whether nonexistent headers - # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -_ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - # Broken: success on invalid input. -continue -else - # Passes both tests. -ac_preproc_ok=: -break +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_var=no" +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:$LINENO: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if test "${ac_cv_prog_CPP+set}" = set; then + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Broken: fails on valid input. +continue +fi + +rm -f conftest.err conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + # Broken: success on invalid input. +continue +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + # Passes both tests. +ac_preproc_ok=: +break fi + rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : +if $ac_preproc_ok; then break fi @@ -3633,7 +3717,7 @@ fi else ac_cv_prog_CPP=$CPP fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +{ $as_echo "$as_me:$LINENO: result: $CPP" >&5 $as_echo "$CPP" >&6; } ac_preproc_ok=false for ac_c_preproc_warn_flag in '' yes @@ -3644,7 +3728,11 @@ do # exists even on freestanding compilers. # On the NeXT, cc -E runs the code through the compiler's parser, # not just through cpp. "Syntax error" is here to catch this case. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #ifdef __STDC__ # include @@ -3653,40 +3741,87 @@ do #endif Syntax error _ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : - +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + : else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + # Broken: fails on valid input. continue fi + rm -f conftest.err conftest.$ac_ext # OK, works on sane cases. Now check whether nonexistent headers # can be detected and how. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF -if ac_fn_c_try_cpp "$LINENO"; then : +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then # Broken: success on invalid input. continue else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + # Passes both tests. ac_preproc_ok=: break fi + rm -f conftest.err conftest.$ac_ext done # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. rm -f conftest.err conftest.$ac_ext -if $ac_preproc_ok; then : - +if $ac_preproc_ok; then + : else - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 + { { $as_echo "$as_me:$LINENO: error: in \`$ac_pwd':" >&5 $as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error "C preprocessor \"$CPP\" fails sanity check -See \`config.log' for more details." "$LINENO" 5; } +{ { $as_echo "$as_me:$LINENO: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&5 +$as_echo "$as_me: error: C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details." >&2;} + { (exit 1); exit 1; }; }; } fi ac_ext=c @@ -3696,9 +3831,9 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $ ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +{ $as_echo "$as_me:$LINENO: checking for grep that handles long lines and -e" >&5 $as_echo_n "checking for grep that handles long lines and -e... " >&6; } -if test "${ac_cv_path_GREP+set}" = set; then : +if test "${ac_cv_path_GREP+set}" = set; then $as_echo_n "(cached) " >&6 else if test -z "$GREP"; then @@ -3709,7 +3844,7 @@ for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_prog in grep ggrep; do + for ac_prog in grep ggrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue @@ -3729,7 +3864,7 @@ case `"$ac_path_GREP" --version 2>&1` in $as_echo 'GREP' >> "conftest.nl" "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val + ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_GREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_GREP="$ac_path_GREP" @@ -3744,24 +3879,26 @@ esac $ac_path_GREP_found && break 3 done done - done +done IFS=$as_save_IFS if test -z "$ac_cv_path_GREP"; then - as_fn_error "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +$as_echo "$as_me: error: no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + { (exit 1); exit 1; }; } fi else ac_cv_path_GREP=$GREP fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_GREP" >&5 $as_echo "$ac_cv_path_GREP" >&6; } GREP="$ac_cv_path_GREP" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +{ $as_echo "$as_me:$LINENO: checking for egrep" >&5 $as_echo_n "checking for egrep... " >&6; } -if test "${ac_cv_path_EGREP+set}" = set; then : +if test "${ac_cv_path_EGREP+set}" = set; then $as_echo_n "(cached) " >&6 else if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 @@ -3775,7 +3912,7 @@ for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - for ac_prog in egrep; do + for ac_prog in egrep; do for ac_exec_ext in '' $ac_executable_extensions; do ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue @@ -3795,7 +3932,7 @@ case `"$ac_path_EGREP" --version 2>&1` in $as_echo 'EGREP' >> "conftest.nl" "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break - as_fn_arith $ac_count + 1 && ac_count=$as_val + ac_count=`expr $ac_count + 1` if test $ac_count -gt ${ac_path_EGREP_max-0}; then # Best one so far, save it but keep looking for a better one ac_cv_path_EGREP="$ac_path_EGREP" @@ -3810,10 +3947,12 @@ esac $ac_path_EGREP_found && break 3 done done - done +done IFS=$as_save_IFS if test -z "$ac_cv_path_EGREP"; then - as_fn_error "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&5 +$as_echo "$as_me: error: no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" >&2;} + { (exit 1); exit 1; }; } fi else ac_cv_path_EGREP=$EGREP @@ -3821,17 +3960,21 @@ fi fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_path_EGREP" >&5 $as_echo "$ac_cv_path_EGREP" >&6; } EGREP="$ac_cv_path_EGREP" -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +{ $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5 $as_echo_n "checking for ANSI C header files... " >&6; } -if test "${ac_cv_header_stdc+set}" = set; then : +if test "${ac_cv_header_stdc+set}" = set; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include @@ -3846,23 +3989,48 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_header_stdc=yes else - ac_cv_header_stdc=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_header_stdc=no fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext if test $ac_cv_header_stdc = yes; then # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - + $EGREP "memchr" >/dev/null 2>&1; then + : else ac_cv_header_stdc=no fi @@ -3872,14 +4040,18 @@ fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - + $EGREP "free" >/dev/null 2>&1; then + : else ac_cv_header_stdc=no fi @@ -3889,10 +4061,14 @@ fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : + if test "$cross_compiling" = yes; then : else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include @@ -3919,118 +4095,369 @@ main () return 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : - +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : else - ac_cv_header_stdc=no + $as_echo "$as_me: program exited with status $ac_status" >&5 +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_header_stdc=no fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext +rm -rf conftest.dSYM +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi + fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then -$as_echo "#define STDC_HEADERS 1" >>confdefs.h - -fi - -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -eval as_val=\$$as_ac_Header - if test "x$as_val" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 _ACEOF fi -done +# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in pcap.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "pcap.h" "ac_cv_header_pcap_h" "$ac_includes_default" -if test "x$ac_cv_header_pcap_h" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_PCAP_H 1 -_ACEOF -fi -done -PCAPLIBS=$LIBS -LIBS=$SAVELIBS -################################################# -################################################# -# Checks for header files. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if test "${ac_cv_header_stdc+set}" = set; then : +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -#include -#include -#include - -int -main () -{ +$ac_includes_default - ; - return 0; -} +#include <$ac_header> _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + eval "$as_ac_Header=yes" else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include + eval "$as_ac_Header=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : -else - ac_cv_header_stdc=no fi -rm -f conftest* -fi +done + + + +for ac_header in pcap.h +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 +$as_echo_n "checking $ac_header usability... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_compiler=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 +$as_echo_n "checking $ac_header presence... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi + +rm -f conftest.err conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + + ;; +esac +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + +fi +as_val=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + +PCAPLIBS=$LIBS +LIBS=$SAVELIBS + + + +################################################# +################################################# + +# Checks for header files. +{ $as_echo "$as_me:$LINENO: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if test "${ac_cv_header_stdc+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_cv_header_stdc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_header_stdc=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then + : +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi if test $ac_cv_header_stdc = yes; then # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include _ACEOF if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : - + $EGREP "free" >/dev/null 2>&1; then + : else ac_cv_header_stdc=no fi @@ -4040,10 +4467,14 @@ fi if test $ac_cv_header_stdc = yes; then # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : + if test "$cross_compiling" = yes; then : else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include @@ -4070,31 +4501,204 @@ main () return 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : - +rm -f conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { ac_try='./conftest$ac_exeext' + { (case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; }; then + : else - ac_cv_header_stdc=no + $as_echo "$as_me: program exited with status $ac_status" >&5 +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +( exit $ac_status ) +ac_cv_header_stdc=no fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext +rm -rf conftest.dSYM +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext conftest.$ac_objext conftest.$ac_ext fi + fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_stdc" >&5 $as_echo "$ac_cv_header_stdc" >&6; } if test $ac_cv_header_stdc = yes; then -$as_echo "#define STDC_HEADERS 1" >>confdefs.h +cat >>confdefs.h <<\_ACEOF +#define STDC_HEADERS 1 +_ACEOF fi + + + + + + + + + + + + + for ac_header in arpa/inet.h fcntl.h inttypes.h limits.h netdb.h netinet/in.h stdlib.h string.h sys/socket.h sys/time.h unistd.h getopt.h libgen.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -eval as_val=\$$as_ac_Header - if test "x$as_val" = x""yes; then : +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 +$as_echo_n "checking $ac_header usability... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_compiler=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 +$as_echo_n "checking $ac_header presence... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi + +rm -f conftest.err conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + + ;; +esac +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + +fi +as_val=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF @@ -4105,12 +4709,16 @@ done # Checks for typedefs, structures, and compiler characteristics. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for an ANSI C-conforming const" >&5 +{ $as_echo "$as_me:$LINENO: checking for an ANSI C-conforming const" >&5 $as_echo_n "checking for an ANSI C-conforming const... " >&6; } -if test "${ac_cv_c_const+set}" = set; then : +if test "${ac_cv_c_const+set}" = set; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -4170,41 +4778,161 @@ main () return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_c_const=yes else - ac_cv_c_const=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_c_const=no fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_const" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_const" >&5 $as_echo "$ac_cv_c_const" >&6; } if test $ac_cv_c_const = no; then -$as_echo "#define const /**/" >>confdefs.h - -fi - -ac_fn_c_check_type "$LINENO" "size_t" "ac_cv_type_size_t" "$ac_includes_default" -if test "x$ac_cv_type_size_t" = x""yes; then : - -else - -cat >>confdefs.h <<_ACEOF -#define size_t unsigned int +cat >>confdefs.h <<\_ACEOF +#define const /**/ _ACEOF fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether time.h and sys/time.h may both be included" >&5 -$as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } -if test "${ac_cv_header_time+set}" = set; then : +{ $as_echo "$as_me:$LINENO: checking for size_t" >&5 +$as_echo_n "checking for size_t... " >&6; } +if test "${ac_cv_type_size_t+set}" = set; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_cv_type_size_t=no +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ -#include -#include +$ac_includes_default +int +main () +{ +if (sizeof (size_t)) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +if (sizeof ((size_t))) + return 0; + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + : +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_type_size_t=yes +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_size_t" >&5 +$as_echo "$ac_cv_type_size_t" >&6; } +if test "x$ac_cv_type_size_t" = x""yes; then + : +else + +cat >>confdefs.h <<_ACEOF +#define size_t unsigned int +_ACEOF + +fi + +{ $as_echo "$as_me:$LINENO: checking whether time.h and sys/time.h may both be included" >&5 +$as_echo_n "checking whether time.h and sys/time.h may both be included... " >&6; } +if test "${ac_cv_header_time+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include +#include #include int @@ -4216,27 +4944,54 @@ return 0; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_header_time=yes else - ac_cv_header_time=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_header_time=no fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_time" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_header_time" >&5 $as_echo "$ac_cv_header_time" >&6; } if test $ac_cv_header_time = yes; then -$as_echo "#define TIME_WITH_SYS_TIME 1" >>confdefs.h +cat >>confdefs.h <<\_ACEOF +#define TIME_WITH_SYS_TIME 1 +_ACEOF fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for working volatile" >&5 +{ $as_echo "$as_me:$LINENO: checking for working volatile" >&5 $as_echo_n "checking for working volatile... " >&6; } -if test "${ac_cv_c_volatile+set}" = set; then : +if test "${ac_cv_c_volatile+set}" = set; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ int @@ -4250,29 +5005,185 @@ return !x && !y; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_c_volatile=yes else - ac_cv_c_volatile=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_c_volatile=no fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_volatile" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_c_volatile" >&5 $as_echo "$ac_cv_c_volatile" >&6; } if test $ac_cv_c_volatile = no; then -$as_echo "#define volatile /**/" >>confdefs.h +cat >>confdefs.h <<\_ACEOF +#define volatile /**/ +_ACEOF fi # Checks for library functions. + + for ac_header in sys/select.h sys/socket.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -eval as_val=\$$as_ac_Header - if test "x$as_val" = x""yes; then : +do +as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + { $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +else + # Is the header compilable? +{ $as_echo "$as_me:$LINENO: checking $ac_header usability" >&5 +$as_echo_n "checking $ac_header usability... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +$ac_includes_default +#include <$ac_header> +_ACEOF +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then + ac_header_compiler=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_compiler=no +fi + +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_compiler" >&5 +$as_echo "$ac_header_compiler" >&6; } + +# Is the header present? +{ $as_echo "$as_me:$LINENO: checking $ac_header presence" >&5 +$as_echo_n "checking $ac_header presence... " >&6; } +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +#include <$ac_header> +_ACEOF +if { (ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } >/dev/null && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then + ac_header_preproc=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_header_preproc=no +fi + +rm -f conftest.err conftest.$ac_ext +{ $as_echo "$as_me:$LINENO: result: $ac_header_preproc" >&5 +$as_echo "$ac_header_preproc" >&6; } + +# So? What about this header? +case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in + yes:no: ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&5 +$as_echo "$as_me: WARNING: $ac_header: accepted by the compiler, rejected by the preprocessor!" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the compiler's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the compiler's result" >&2;} + ac_header_preproc=yes + ;; + no:yes:* ) + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: present but cannot be compiled" >&5 +$as_echo "$as_me: WARNING: $ac_header: present but cannot be compiled" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: check for missing prerequisite headers?" >&5 +$as_echo "$as_me: WARNING: $ac_header: check for missing prerequisite headers?" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: see the Autoconf documentation" >&5 +$as_echo "$as_me: WARNING: $ac_header: see the Autoconf documentation" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&5 +$as_echo "$as_me: WARNING: $ac_header: section \"Present But Cannot Be Compiled\"" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: proceeding with the preprocessor's result" >&5 +$as_echo "$as_me: WARNING: $ac_header: proceeding with the preprocessor's result" >&2;} + { $as_echo "$as_me:$LINENO: WARNING: $ac_header: in the future, the compiler will take precedence" >&5 +$as_echo "$as_me: WARNING: $ac_header: in the future, the compiler will take precedence" >&2;} + + ;; +esac +{ $as_echo "$as_me:$LINENO: checking for $ac_header" >&5 +$as_echo_n "checking for $ac_header... " >&6; } +if { as_var=$as_ac_Header; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + eval "$as_ac_Header=\$ac_header_preproc" +fi +ac_res=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + +fi +as_val=`eval 'as_val=${'$as_ac_Header'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF @@ -4281,15 +5192,19 @@ fi done -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking types of arguments for select" >&5 +{ $as_echo "$as_me:$LINENO: checking types of arguments for select" >&5 $as_echo_n "checking types of arguments for select... " >&6; } -if test "${ac_cv_func_select_args+set}" = set; then : +if test "${ac_cv_func_select_args+set}" = set; then $as_echo_n "(cached) " >&6 else for ac_arg234 in 'fd_set *' 'int *' 'void *'; do for ac_arg1 in 'int' 'size_t' 'unsigned long int' 'unsigned int'; do for ac_arg5 in 'struct timeval *' 'const struct timeval *'; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ $ac_includes_default #ifdef HAVE_SYS_SELECT_H @@ -4309,9 +5224,32 @@ extern int select ($ac_arg1, return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_func_select_args="$ac_arg1,$ac_arg234,$ac_arg5"; break 3 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext done done @@ -4320,7 +5258,7 @@ done : ${ac_cv_func_select_args='int,int *,struct timeval *'} fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_select_args" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_func_select_args" >&5 $as_echo "$ac_cv_func_select_args" >&6; } ac_save_IFS=$IFS; IFS=',' set dummy `echo "$ac_cv_func_select_args" | sed 's/\*/\*/g'` @@ -4343,12 +5281,16 @@ _ACEOF rm -f conftest* -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking return type of signal handlers" >&5 +{ $as_echo "$as_me:$LINENO: checking return type of signal handlers" >&5 $as_echo_n "checking return type of signal handlers... " >&6; } -if test "${ac_cv_type_signal+set}" = set; then : +if test "${ac_cv_type_signal+set}" = set; then $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ #include #include @@ -4361,14 +5303,35 @@ return *(signal (0, 0)) (0) == 1; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +rm -f conftest.$ac_objext +if { (ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_compile") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then ac_cv_type_signal=int else - ac_cv_type_signal=void + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_type_signal=void fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_signal" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_type_signal" >&5 $as_echo "$ac_cv_type_signal" >&6; } cat >>confdefs.h <<_ACEOF @@ -4376,24 +5339,118 @@ cat >>confdefs.h <<_ACEOF _ACEOF + for ac_func in strftime -do : - ac_fn_c_check_func "$LINENO" "strftime" "ac_cv_func_strftime" -if test "x$ac_cv_func_strftime" = x""yes; then : +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_var=no" +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF -#define HAVE_STRFTIME 1 +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF else # strftime is in -lintl on SCO UNIX. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for strftime in -lintl" >&5 +{ $as_echo "$as_me:$LINENO: checking for strftime in -lintl" >&5 $as_echo_n "checking for strftime in -lintl... " >&6; } -if test "${ac_cv_lib_intl_strftime+set}" = set; then : +if test "${ac_cv_lib_intl_strftime+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lintl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -4411,19 +5468,46 @@ return strftime (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_intl_strftime=yes else - ac_cv_lib_intl_strftime=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_intl_strftime=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_intl_strftime" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_intl_strftime" >&5 $as_echo "$ac_cv_lib_intl_strftime" >&6; } -if test "x$ac_cv_lib_intl_strftime" = x""yes; then : - $as_echo "#define HAVE_STRFTIME 1" >>confdefs.h +if test "x$ac_cv_lib_intl_strftime" = x""yes; then + cat >>confdefs.h <<\_ACEOF +#define HAVE_STRFTIME 1 +_ACEOF LIBS="-lintl $LIBS" fi @@ -4431,55 +5515,422 @@ fi fi done + for ac_func in vprintf -do : - ac_fn_c_check_func "$LINENO" "vprintf" "ac_cv_func_vprintf" -if test "x$ac_cv_func_vprintf" = x""yes; then : +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_var=no" +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF -#define HAVE_VPRINTF 1 +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +{ $as_echo "$as_me:$LINENO: checking for _doprnt" >&5 +$as_echo_n "checking for _doprnt... " >&6; } +if test "${ac_cv_func__doprnt+set}" = set; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ _ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define _doprnt to an innocuous variant, in case declares _doprnt. + For example, HP-UX 11i declares gettimeofday. */ +#define _doprnt innocuous__doprnt -ac_fn_c_check_func "$LINENO" "_doprnt" "ac_cv_func__doprnt" -if test "x$ac_cv_func__doprnt" = x""yes; then : +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char _doprnt (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ -$as_echo "#define HAVE_DOPRNT 1" >>confdefs.h +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef _doprnt + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char _doprnt (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub__doprnt || defined __stub____doprnt +choke me +#endif + +int +main () +{ +return _doprnt (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_func__doprnt=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_func__doprnt=no +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_func__doprnt" >&5 +$as_echo "$ac_cv_func__doprnt" >&6; } +if test "x$ac_cv_func__doprnt" = x""yes; then + +cat >>confdefs.h <<\_ACEOF +#define HAVE_DOPRNT 1 +_ACEOF + +fi + +fi +done + + + + + + + + + + + +for ac_func in gettimeofday basename inet_ntoa memmove memset mkdir select strdup strerror +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_var=no" +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + +for ac_func in gethostbyname +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + eval "$as_ac_var=no" fi +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext fi -done - - -for ac_func in gettimeofday basename inet_ntoa memmove memset mkdir select strdup strerror -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -eval as_val=\$$as_ac_var - if test "x$as_val" = x""yes; then : +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF #define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF -fi -done - -for ac_func in gethostbyname -do : - ac_fn_c_check_func "$LINENO" "gethostbyname" "ac_cv_func_gethostbyname" -if test "x$ac_cv_func_gethostbyname" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_GETHOSTBYNAME 1 -_ACEOF - else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lnsl" >&5 + +{ $as_echo "$as_me:$LINENO: checking for gethostbyname in -lnsl" >&5 $as_echo_n "checking for gethostbyname in -lnsl... " >&6; } -if test "${ac_cv_lib_nsl_gethostbyname+set}" = set; then : +if test "${ac_cv_lib_nsl_gethostbyname+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lnsl $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -4497,18 +5948,43 @@ return gethostbyname (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_nsl_gethostbyname=yes else - ac_cv_lib_nsl_gethostbyname=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_nsl_gethostbyname=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_nsl_gethostbyname" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_nsl_gethostbyname" >&5 $as_echo "$ac_cv_lib_nsl_gethostbyname" >&6; } -if test "x$ac_cv_lib_nsl_gethostbyname" = x""yes; then : +if test "x$ac_cv_lib_nsl_gethostbyname" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_LIBNSL 1 _ACEOF @@ -4516,14 +5992,19 @@ _ACEOF LIBS="-lnsl $LIBS" else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lsocket" >&5 + +{ $as_echo "$as_me:$LINENO: checking for gethostbyname in -lsocket" >&5 $as_echo_n "checking for gethostbyname in -lsocket... " >&6; } -if test "${ac_cv_lib_socket_gethostbyname+set}" = set; then : +if test "${ac_cv_lib_socket_gethostbyname+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsocket $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -4541,18 +6022,43 @@ return gethostbyname (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_socket_gethostbyname=yes else - ac_cv_lib_socket_gethostbyname=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_socket_gethostbyname=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_gethostbyname" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_socket_gethostbyname" >&5 $as_echo "$ac_cv_lib_socket_gethostbyname" >&6; } -if test "x$ac_cv_lib_socket_gethostbyname" = x""yes; then : +if test "x$ac_cv_lib_socket_gethostbyname" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_LIBSOCKET 1 _ACEOF @@ -4566,23 +6072,118 @@ fi fi done + for ac_func in setsockopt -do : - ac_fn_c_check_func "$LINENO" "setsockopt" "ac_cv_func_setsockopt" -if test "x$ac_cv_func_setsockopt" = x""yes; then : +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_var=no" +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF -#define HAVE_SETSOCKOPT 1 +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for setsockopt in -lsocket" >&5 + +{ $as_echo "$as_me:$LINENO: checking for setsockopt in -lsocket" >&5 $as_echo_n "checking for setsockopt in -lsocket... " >&6; } -if test "${ac_cv_lib_socket_setsockopt+set}" = set; then : +if test "${ac_cv_lib_socket_setsockopt+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lsocket $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -4600,18 +6201,43 @@ return setsockopt (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_socket_setsockopt=yes else - ac_cv_lib_socket_setsockopt=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_socket_setsockopt=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_setsockopt" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_socket_setsockopt" >&5 $as_echo "$ac_cv_lib_socket_setsockopt" >&6; } -if test "x$ac_cv_lib_socket_setsockopt" = x""yes; then : +if test "x$ac_cv_lib_socket_setsockopt" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_LIBSOCKET 1 _ACEOF @@ -4623,23 +6249,118 @@ fi fi done + for ac_func in hstrerror -do : - ac_fn_c_check_func "$LINENO" "hstrerror" "ac_cv_func_hstrerror" -if test "x$ac_cv_func_hstrerror" = x""yes; then : +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then + $as_echo_n "(cached) " >&6 +else + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + +int +main () +{ +return $ac_func (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + eval "$as_ac_var=no" +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +fi +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF -#define HAVE_HSTRERROR 1 +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for hstrerror in -lresolv" >&5 + +{ $as_echo "$as_me:$LINENO: checking for hstrerror in -lresolv" >&5 $as_echo_n "checking for hstrerror in -lresolv... " >&6; } -if test "${ac_cv_lib_resolv_hstrerror+set}" = set; then : +if test "${ac_cv_lib_resolv_hstrerror+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lresolv $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -4657,34 +6378,161 @@ return hstrerror (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_resolv_hstrerror=yes else - ac_cv_lib_resolv_hstrerror=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_resolv_hstrerror=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_resolv_hstrerror" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_resolv_hstrerror" >&5 $as_echo "$ac_cv_lib_resolv_hstrerror" >&6; } -if test "x$ac_cv_lib_resolv_hstrerror" = x""yes; then : +if test "x$ac_cv_lib_resolv_hstrerror" = x""yes; then cat >>confdefs.h <<_ACEOF #define HAVE_LIBRESOLV 1 _ACEOF LIBS="-lresolv $LIBS" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for gethostbyname in -lsocket" >&5 -$as_echo_n "checking for gethostbyname in -lsocket... " >&6; } -if test "${ac_cv_lib_socket_gethostbyname+set}" = set; then : +else + +{ $as_echo "$as_me:$LINENO: checking for gethostbyname in -lsocket" >&5 +$as_echo_n "checking for gethostbyname in -lsocket... " >&6; } +if test "${ac_cv_lib_socket_gethostbyname+set}" = set; then + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsocket $LIBS" +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char gethostbyname (); +int +main () +{ +return gethostbyname (); + ; + return 0; +} +_ACEOF +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + ac_cv_lib_socket_gethostbyname=yes +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_socket_gethostbyname=no +fi + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_socket_gethostbyname" >&5 +$as_echo "$ac_cv_lib_socket_gethostbyname" >&6; } +if test "x$ac_cv_lib_socket_gethostbyname" = x""yes; then + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSOCKET 1 +_ACEOF + + LIBS="-lsocket $LIBS" + +fi + +fi + +fi +done + + +for ac_func in getopt_long +do +as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +{ $as_echo "$as_me:$LINENO: checking for $ac_func" >&5 +$as_echo_n "checking for $ac_func... " >&6; } +if { as_var=$as_ac_var; eval "test \"\${$as_var+set}\" = set"; }; then $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lsocket $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ +/* Define $ac_func to an innocuous variant, in case declares $ac_func. + For example, HP-UX 11i declares gettimeofday. */ +#define $ac_func innocuous_$ac_func + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $ac_func (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $ac_func /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC @@ -4692,58 +6540,80 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char gethostbyname (); +char $ac_func (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$ac_func || defined __stub___$ac_func +choke me +#endif + int main () { -return gethostbyname (); +return $ac_func (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_socket_gethostbyname=yes +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then + eval "$as_ac_var=yes" else - ac_cv_lib_socket_gethostbyname=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_socket_gethostbyname" >&5 -$as_echo "$ac_cv_lib_socket_gethostbyname" >&6; } -if test "x$ac_cv_lib_socket_gethostbyname" = x""yes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBSOCKET 1 -_ACEOF - - LIBS="-lsocket $LIBS" - -fi + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + eval "$as_ac_var=no" fi +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext fi -done - -for ac_func in getopt_long -do : - ac_fn_c_check_func "$LINENO" "getopt_long" "ac_cv_func_getopt_long" -if test "x$ac_cv_func_getopt_long" = x""yes; then : +ac_res=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + { $as_echo "$as_me:$LINENO: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } +as_val=`eval 'as_val=${'$as_ac_var'} + $as_echo "$as_val"'` + if test "x$as_val" = x""yes; then cat >>confdefs.h <<_ACEOF -#define HAVE_GETOPT_LONG 1 +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF else #FreeBSD has a gnugetopt library for this - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for getopt_long in -lgnugetopt" >&5 + { $as_echo "$as_me:$LINENO: checking for getopt_long in -lgnugetopt" >&5 $as_echo_n "checking for getopt_long in -lgnugetopt... " >&6; } -if test "${ac_cv_lib_gnugetopt_getopt_long+set}" = set; then : +if test "${ac_cv_lib_gnugetopt_getopt_long+set}" = set; then $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS LIBS="-lgnugetopt $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +cat >conftest.$ac_ext <<_ACEOF +/* confdefs.h. */ +_ACEOF +cat confdefs.h >>conftest.$ac_ext +cat >>conftest.$ac_ext <<_ACEOF /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -4761,19 +6631,46 @@ return getopt_long (); return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : +rm -f conftest.$ac_objext conftest$ac_exeext +if { (ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:$LINENO: $ac_try_echo\"" +$as_echo "$ac_try_echo") >&5 + (eval "$ac_link") 2>conftest.er1 + ac_status=$? + grep -v '^ *+' conftest.er1 >conftest.err + rm -f conftest.er1 + cat conftest.err >&5 + $as_echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then ac_cv_lib_gnugetopt_getopt_long=yes else - ac_cv_lib_gnugetopt_getopt_long=no + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_cv_lib_gnugetopt_getopt_long=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext + +rm -rf conftest.dSYM +rm -f core conftest.err conftest.$ac_objext conftest_ipa8_conftest.oo \ + conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_gnugetopt_getopt_long" >&5 +{ $as_echo "$as_me:$LINENO: result: $ac_cv_lib_gnugetopt_getopt_long" >&5 $as_echo "$ac_cv_lib_gnugetopt_getopt_long" >&6; } -if test "x$ac_cv_lib_gnugetopt_getopt_long" = x""yes; then : - LIBS="-lgnugetopt $LIBS";$as_echo "#define HAVE_GETOPT_LONG 1" >>confdefs.h +if test "x$ac_cv_lib_gnugetopt_getopt_long" = x""yes; then + LIBS="-lgnugetopt $LIBS";cat >>confdefs.h <<\_ACEOF +#define HAVE_GETOPT_LONG 1 +_ACEOF fi @@ -4810,13 +6707,13 @@ _ACEOF case $ac_val in #( *${as_nl}*) case $ac_var in #( - *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 + *_cv_*) { $as_echo "$as_me:$LINENO: WARNING: cache variable $ac_var contains a newline" >&5 $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; esac case $ac_var in #( _ | IFS | as_nl) ;; #( BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( - *) { eval $ac_var=; unset $ac_var;} ;; + *) $as_unset $ac_var ;; esac ;; esac done @@ -4824,8 +6721,8 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; (set) 2>&1 | case $as_nl`(ac_space=' '; set) 2>&1` in #( *${as_nl}ac_space=\ *) - # `set' does not quote correctly, so add quotes: double-quote - # substitution turns \\\\ into \\, and sed turns \\ into \. + # `set' does not quote correctly, so add quotes (double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \). sed -n \ "s/'/'\\\\''/g; s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" @@ -4848,11 +6745,11 @@ $as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; if diff "$cache_file" confcache >/dev/null 2>&1; then :; else if test -w "$cache_file"; then test "x$cache_file" != "x/dev/null" && - { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 + { $as_echo "$as_me:$LINENO: updating cache $cache_file" >&5 $as_echo "$as_me: updating cache $cache_file" >&6;} cat confcache >$cache_file else - { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 + { $as_echo "$as_me:$LINENO: not updating unwritable cache $cache_file" >&5 $as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} fi fi @@ -4872,8 +6769,8 @@ for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue ac_i=`$as_echo "$ac_i" | sed "$ac_script"` # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR # will be set to the directory where LIBOBJS objects are built. - as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" - as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' + ac_libobjs="$ac_libobjs \${LIBOBJDIR}$ac_i\$U.$ac_objext" + ac_ltlibobjs="$ac_ltlibobjs \${LIBOBJDIR}$ac_i"'$U.lo' done LIBOBJS=$ac_libobjs @@ -4885,10 +6782,9 @@ LTLIBOBJS=$ac_ltlibobjs ac_write_fail=0 ac_clean_files_save=$ac_clean_files ac_clean_files="$ac_clean_files $CONFIG_STATUS" -{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +{ $as_echo "$as_me:$LINENO: creating $CONFIG_STATUS" >&5 $as_echo "$as_me: creating $CONFIG_STATUS" >&6;} -as_write_fail=0 -cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +cat >$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 #! $SHELL # Generated by $as_me. # Run this file to recreate the current configuration. @@ -4898,18 +6794,17 @@ cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 debug=false ac_cs_recheck=false ac_cs_silent=false - SHELL=\${CONFIG_SHELL-$SHELL} -export SHELL -_ASEOF -cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 -## -------------------- ## -## M4sh Initialization. ## -## -------------------- ## +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +## --------------------- ## +## M4sh Initialization. ## +## --------------------- ## # Be more Bourne compatible DUALCASE=1; export DUALCASE # for MKS sh -if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then emulate sh NULLCMD=: # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which @@ -4917,15 +6812,23 @@ if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : alias -g '${1+"$@"}'='"$@"' setopt NO_GLOB_SUBST else - case `(set -o) 2>/dev/null` in #( - *posix*) : - set -o posix ;; #( - *) : - ;; + case `(set -o) 2>/dev/null` in + *posix*) set -o posix ;; esac + fi + + +# PATH needs CR +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + as_nl=' ' export as_nl @@ -4933,13 +6836,7 @@ export as_nl as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo -# Prefer a ksh shell builtin over an external printf program on Solaris, -# but without wasting forks for bash or zsh. -if test -z "$BASH_VERSION$ZSH_VERSION" \ - && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then - as_echo='print -r --' - as_echo_n='print -rn --' -elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then +if (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then as_echo='printf %s\n' as_echo_n='printf %s' else @@ -4950,7 +6847,7 @@ else as_echo_body='eval expr "X$1" : "X\\(.*\\)"' as_echo_n_body='eval arg=$1; - case $arg in #( + case $arg in *"$as_nl"*) expr "X$arg" : "X\\(.*\\)$as_nl"; arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; @@ -4973,6 +6870,13 @@ if test "${PATH_SEPARATOR+set}" != set; then } fi +# Support unset when possible. +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + as_unset=unset +else + as_unset=false +fi + # IFS # We need space, tab and new line, in precisely that order. Quoting is @@ -4982,15 +6886,15 @@ fi IFS=" "" $as_nl" # Find who we are. Look in the path if we contain no directory separator. -case $0 in #(( +case $0 in *[\\/]* ) as_myself=$0 ;; *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH do IFS=$as_save_IFS test -z "$as_dir" && as_dir=. - test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break - done + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break +done IFS=$as_save_IFS ;; @@ -5002,16 +6906,12 @@ if test "x$as_myself" = x; then fi if test ! -f "$as_myself"; then $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 - exit 1 + { (exit 1); exit 1; } fi -# Unset variables that we do not need and which cause bugs (e.g. in -# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" -# suppresses any "Segmentation fault" message there. '((' could -# trigger a bug in pdksh 5.2.14. -for as_var in BASH_ENV ENV MAIL MAILPATH -do eval test x\${$as_var+set} = xset \ - && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +# Work around bugs in pre-3.0 UWIN ksh. +for as_var in ENV MAIL MAILPATH +do ($as_unset $as_var) >/dev/null 2>&1 && $as_unset $as_var done PS1='$ ' PS2='> ' @@ -5023,89 +6923,7 @@ export LC_ALL LANGUAGE=C export LANGUAGE -# CDPATH. -(unset CDPATH) >/dev/null 2>&1 && unset CDPATH - - -# as_fn_error ERROR [LINENO LOG_FD] -# --------------------------------- -# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are -# provided, also output the error to LOG_FD, referencing LINENO. Then exit the -# script with status $?, using 1 if that was 0. -as_fn_error () -{ - as_status=$?; test $as_status -eq 0 && as_status=1 - if test "$3"; then - as_lineno=${as_lineno-"$2"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - $as_echo "$as_me:${as_lineno-$LINENO}: error: $1" >&$3 - fi - $as_echo "$as_me: error: $1" >&2 - as_fn_exit $as_status -} # as_fn_error - - -# as_fn_set_status STATUS -# ----------------------- -# Set $? to STATUS, without forking. -as_fn_set_status () -{ - return $1 -} # as_fn_set_status - -# as_fn_exit STATUS -# ----------------- -# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. -as_fn_exit () -{ - set +e - as_fn_set_status $1 - exit $1 -} # as_fn_exit - -# as_fn_unset VAR -# --------------- -# Portably unset VAR. -as_fn_unset () -{ - { eval $1=; unset $1;} -} -as_unset=as_fn_unset -# as_fn_append VAR VALUE -# ---------------------- -# Append the text in VALUE to the end of the definition contained in VAR. Take -# advantage of any shell optimizations that allow amortized linear growth over -# repeated appends, instead of the typical quadratic growth present in naive -# implementations. -if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : - eval 'as_fn_append () - { - eval $1+=\$2 - }' -else - as_fn_append () - { - eval $1=\$$1\$2 - } -fi # as_fn_append - -# as_fn_arith ARG... -# ------------------ -# Perform arithmetic evaluation on the ARGs, and store the result in the -# global $as_val. Take advantage of shells that can avoid forks. The arguments -# must be portable across $(()) and expr. -if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : - eval 'as_fn_arith () - { - as_val=$(( $* )) - }' -else - as_fn_arith () - { - as_val=`expr "$@" || test $? -eq 1` - } -fi # as_fn_arith - - +# Required to use basename. if expr a : '\(a\)' >/dev/null 2>&1 && test "X`expr 00001 : '.*\(...\)'`" = X001; then as_expr=expr @@ -5119,12 +6937,8 @@ else as_basename=false fi -if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then - as_dirname=dirname -else - as_dirname=false -fi +# Name of the executable. as_me=`$as_basename -- "$0" || $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ X"$0" : 'X\(//\)$' \| \ @@ -5144,25 +6958,76 @@ $as_echo X/"$0" | } s/.*/./; q'` -# Avoid depending upon Character Ranges. -as_cr_letters='abcdefghijklmnopqrstuvwxyz' -as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' -as_cr_Letters=$as_cr_letters$as_cr_LETTERS -as_cr_digits='0123456789' -as_cr_alnum=$as_cr_Letters$as_cr_digits +# CDPATH. +$as_unset CDPATH + + + + as_lineno_1=$LINENO + as_lineno_2=$LINENO + test "x$as_lineno_1" != "x$as_lineno_2" && + test "x`expr $as_lineno_1 + 1`" = "x$as_lineno_2" || { + + # Create $as_me.lineno as a copy of $as_myself, but with $LINENO + # uniformly replaced by the line number. The first 'sed' inserts a + # line-number line after each line using $LINENO; the second 'sed' + # does the real work. The second script uses 'N' to pair each + # line-number line with the line containing $LINENO, and appends + # trailing '-' during substitution so that $LINENO is not a special + # case at line end. + # (Raja R Harinath suggested sed '=', and Paul Eggert wrote the + # scripts with optimization help from Paolo Bonzini. Blame Lee + # E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2 + { (exit 1); exit 1; }; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi ECHO_C= ECHO_N= ECHO_T= -case `echo -n x` in #((((( +case `echo -n x` in -n*) - case `echo 'xy\c'` in + case `echo 'x\c'` in *c*) ECHO_T=' ';; # ECHO_T is single tab character. - xy) ECHO_C='\c';; - *) echo `echo ksh88 bug on AIX 6.1` > /dev/null - ECHO_T=' ';; + *) ECHO_C='\c';; esac;; *) ECHO_N='-n';; esac +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi rm -f conf$$ conf$$.exe conf$$.file if test -d conf$$.dir; then @@ -5191,56 +7056,8 @@ fi rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file rmdir conf$$.dir 2>/dev/null - -# as_fn_mkdir_p -# ------------- -# Create "$as_dir" as a directory, including parents if necessary. -as_fn_mkdir_p () -{ - - case $as_dir in #( - -*) as_dir=./$as_dir;; - esac - test -d "$as_dir" || eval $as_mkdir_p || { - as_dirs= - while :; do - case $as_dir in #( - *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( - *) as_qdir=$as_dir;; - esac - as_dirs="'$as_qdir' $as_dirs" - as_dir=`$as_dirname -- "$as_dir" || -$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ - X"$as_dir" : 'X\(//\)[^/]' \| \ - X"$as_dir" : 'X\(//\)$' \| \ - X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || -$as_echo X"$as_dir" | - sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ - s//\1/ - q - } - /^X\(\/\/\)[^/].*/{ - s//\1/ - q - } - /^X\(\/\/\)$/{ - s//\1/ - q - } - /^X\(\/\).*/{ - s//\1/ - q - } - s/.*/./; q'` - test -d "$as_dir" && break - done - test -z "$as_dirs" || eval "mkdir $as_dirs" - } || test -d "$as_dir" || as_fn_error "cannot create directory $as_dir" - - -} # as_fn_mkdir_p if mkdir -p . 2>/dev/null; then - as_mkdir_p='mkdir -p "$as_dir"' + as_mkdir_p=: else test -d ./-p && rmdir ./-p as_mkdir_p=false @@ -5259,10 +7076,10 @@ else if test -d "$1"; then test -d "$1/."; else - case $1 in #( + case $1 in -*)set "./$1";; esac; - case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in ???[sx]*):;;*)false;;esac;fi '\'' sh ' @@ -5277,19 +7094,13 @@ as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" exec 6>&1 -## ----------------------------------- ## -## Main body of $CONFIG_STATUS script. ## -## ----------------------------------- ## -_ASEOF -test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 -cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 -# Save the log message, to keep $0 and so on meaningful, and to +# Save the log message, to keep $[0] and so on meaningful, and to # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" This file was extended by libipfix $as_me 0.8.2 , which was -generated by GNU Autoconf 2.65. Invocation command line was +generated by GNU Autoconf 2.63. Invocation command line was CONFIG_FILES = $CONFIG_FILES CONFIG_HEADERS = $CONFIG_HEADERS @@ -5320,15 +7131,13 @@ _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 ac_cs_usage="\ -\`$as_me' instantiates files and other configuration actions -from templates according to the current configuration. Unless the files -and actions are specified as TAGs, all are instantiated by default. +\`$as_me' instantiates files from templates according to the +current configuration. -Usage: $0 [OPTION]... [TAG]... +Usage: $0 [OPTION]... [FILE]... -h, --help print this help, then exit -V, --version print version number and configuration settings, then exit - --config print configuration, then exit -q, --quiet, --silent do not print progress messages -d, --debug don't remove temporary files @@ -5344,17 +7153,16 @@ $config_files Configuration headers: $config_headers -Report bugs to the package provider." +Report bugs to ." _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 -ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ libipfix config.status 0.8.2 -configured by $0, generated by GNU Autoconf 2.65, - with options \\"\$ac_cs_config\\" +configured by $0, generated by GNU Autoconf 2.63, + with options \\"`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`\\" -Copyright (C) 2009 Free Software Foundation, Inc. +Copyright (C) 2008 Free Software Foundation, Inc. This config.status script is free software; the Free Software Foundation gives unlimited permission to copy, distribute and modify it." @@ -5388,8 +7196,6 @@ do ac_cs_recheck=: ;; --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) $as_echo "$ac_cs_version"; exit ;; - --config | --confi | --conf | --con | --co | --c ) - $as_echo "$ac_cs_config"; exit ;; --debug | --debu | --deb | --de | --d | -d ) debug=: ;; --file | --fil | --fi | --f ) @@ -5397,19 +7203,20 @@ do case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac - as_fn_append CONFIG_FILES " '$ac_optarg'" + CONFIG_FILES="$CONFIG_FILES '$ac_optarg'" ac_need_defaults=false;; --header | --heade | --head | --hea ) $ac_shift case $ac_optarg in *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; esac - as_fn_append CONFIG_HEADERS " '$ac_optarg'" + CONFIG_HEADERS="$CONFIG_HEADERS '$ac_optarg'" ac_need_defaults=false;; --he | --h) # Conflict between --help and --header - as_fn_error "ambiguous option: \`$1' -Try \`$0 --help' for more information.";; + { $as_echo "$as_me: error: ambiguous option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; };; --help | --hel | -h ) $as_echo "$ac_cs_usage"; exit ;; -q | -quiet | --quiet | --quie | --qui | --qu | --q \ @@ -5417,10 +7224,11 @@ Try \`$0 --help' for more information.";; ac_cs_silent=: ;; # This is an error. - -*) as_fn_error "unrecognized option: \`$1' -Try \`$0 --help' for more information." ;; + -*) { $as_echo "$as_me: error: unrecognized option: $1 +Try \`$0 --help' for more information." >&2 + { (exit 1); exit 1; }; } ;; - *) as_fn_append ac_config_targets " $1" + *) ac_config_targets="$ac_config_targets $1" ac_need_defaults=false ;; esac @@ -5475,7 +7283,9 @@ do "collector/Makefile") CONFIG_FILES="$CONFIG_FILES collector/Makefile" ;; "libipfix.pc") CONFIG_FILES="$CONFIG_FILES libipfix.pc" ;; - *) as_fn_error "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + *) { { $as_echo "$as_me:$LINENO: error: invalid argument: $ac_config_target" >&5 +$as_echo "$as_me: error: invalid argument: $ac_config_target" >&2;} + { (exit 1); exit 1; }; };; esac done @@ -5501,7 +7311,7 @@ $debug || trap 'exit_status=$? { test -z "$tmp" || test ! -d "$tmp" || rm -fr "$tmp"; } && exit $exit_status ' 0 - trap 'as_fn_exit 1' 1 2 13 15 + trap '{ (exit 1); exit 1; }' 1 2 13 15 } # Create a (secure) tmp directory for tmp files. @@ -5512,7 +7322,11 @@ $debug || { tmp=./conf$$-$RANDOM (umask 077 && mkdir "$tmp") -} || as_fn_error "cannot create a temporary directory in ." "$LINENO" 5 +} || +{ + $as_echo "$as_me: cannot create a temporary directory in ." >&2 + { (exit 1); exit 1; } +} # Set up the scripts for CONFIG_FILES section. # No need to generate them if there are no CONFIG_FILES. @@ -5520,16 +7334,10 @@ $debug || if test -n "$CONFIG_FILES"; then -ac_cr=`echo X | tr X '\015'` -# On cygwin, bash can eat \r inside `` if the user requested igncr. -# But we know of no other shell where ac_cr would be empty at this -# point, so we can use a bashism as a fallback. -if test "x$ac_cr" = x; then - eval ac_cr=\$\'\\r\' -fi +ac_cr=' ' ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then - ac_cs_awk_cr='\r' + ac_cs_awk_cr='\\r' else ac_cs_awk_cr=$ac_cr fi @@ -5543,18 +7351,24 @@ _ACEOF echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && echo "_ACEOF" } >conf$$subs.sh || - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } ac_delim_num=`echo "$ac_subst_vars" | grep -c '$'` ac_delim='%!_!# ' for ac_last_try in false false false false false :; do . ./conf$$subs.sh || - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` if test $ac_delim_n = $ac_delim_num; then break elif $ac_last_try; then - as_fn_error "could not make $CONFIG_STATUS" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi @@ -5576,7 +7390,7 @@ s/'"$ac_delim"'$// t delim :nl h -s/\(.\{148\}\)..*/\1/ +s/\(.\{148\}\).*/\1/ t more1 s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ p @@ -5590,7 +7404,7 @@ s/.\{148\}// t nl :delim h -s/\(.\{148\}\)..*/\1/ +s/\(.\{148\}\).*/\1/ t more2 s/["\\]/\\&/g; s/^/"/; s/$/"/ p @@ -5643,7 +7457,9 @@ if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then else cat fi < "$tmp/subs1.awk" > "$tmp/subs.awk" \ - || as_fn_error "could not setup config files machinery" "$LINENO" 5 + || { { $as_echo "$as_me:$LINENO: error: could not setup config files machinery" >&5 +$as_echo "$as_me: error: could not setup config files machinery" >&2;} + { (exit 1); exit 1; }; } _ACEOF # VPATH may cause trouble with some makes, so we remove $(srcdir), @@ -5684,7 +7500,9 @@ for ac_last_try in false false :; do if test -z "$ac_t"; then break elif $ac_last_try; then - as_fn_error "could not make $CONFIG_HEADERS" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: could not make $CONFIG_HEADERS" >&5 +$as_echo "$as_me: error: could not make $CONFIG_HEADERS" >&2;} + { (exit 1); exit 1; }; } else ac_delim="$ac_delim!$ac_delim _$ac_delim!! " fi @@ -5769,7 +7587,9 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 _ACAWK _ACEOF cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 - as_fn_error "could not setup config headers machinery" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: could not setup config headers machinery" >&5 +$as_echo "$as_me: error: could not setup config headers machinery" >&2;} + { (exit 1); exit 1; }; } fi # test -n "$CONFIG_HEADERS" @@ -5782,7 +7602,9 @@ do esac case $ac_mode$ac_tag in :[FHL]*:*);; - :L* | :C*:*) as_fn_error "invalid tag \`$ac_tag'" "$LINENO" 5;; + :L* | :C*:*) { { $as_echo "$as_me:$LINENO: error: invalid tag $ac_tag" >&5 +$as_echo "$as_me: error: invalid tag $ac_tag" >&2;} + { (exit 1); exit 1; }; };; :[FH]-) ac_tag=-:-;; :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; esac @@ -5810,10 +7632,12 @@ do [\\/$]*) false;; *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; esac || - as_fn_error "cannot find input file: \`$ac_f'" "$LINENO" 5;; + { { $as_echo "$as_me:$LINENO: error: cannot find input file: $ac_f" >&5 +$as_echo "$as_me: error: cannot find input file: $ac_f" >&2;} + { (exit 1); exit 1; }; };; esac case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac - as_fn_append ac_file_inputs " '$ac_f'" + ac_file_inputs="$ac_file_inputs '$ac_f'" done # Let's still pretend it is `configure' which instantiates (i.e., don't @@ -5824,7 +7648,7 @@ do `' by configure.' if test x"$ac_file" != x-; then configure_input="$ac_file. $configure_input" - { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 + { $as_echo "$as_me:$LINENO: creating $ac_file" >&5 $as_echo "$as_me: creating $ac_file" >&6;} fi # Neutralize special characters interpreted by sed in replacement strings. @@ -5837,7 +7661,9 @@ $as_echo "$as_me: creating $ac_file" >&6;} case $ac_tag in *:-:* | *:-) cat >"$tmp/stdin" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 ;; + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } ;; esac ;; esac @@ -5865,7 +7691,47 @@ $as_echo X"$ac_file" | q } s/.*/./; q'` - as_dir="$ac_dir"; as_fn_mkdir_p + { as_dir="$ac_dir" + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || { $as_mkdir_p && mkdir -p "$as_dir"; } || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || { { $as_echo "$as_me:$LINENO: error: cannot create directory $as_dir" >&5 +$as_echo "$as_me: error: cannot create directory $as_dir" >&2;} + { (exit 1); exit 1; }; }; } ac_builddir=. case "$ac_dir" in @@ -5917,6 +7783,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # If the template does not know about datarootdir, expand it. # FIXME: This hack should be removed a few years after 2.60. ac_datarootdir_hack=; ac_datarootdir_seen= + ac_sed_dataroot=' /datarootdir/ { p @@ -5926,11 +7793,12 @@ ac_sed_dataroot=' /@docdir@/p /@infodir@/p /@localedir@/p -/@mandir@/p' +/@mandir@/p +' case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in *datarootdir*) ac_datarootdir_seen=yes;; *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 + { $as_echo "$as_me:$LINENO: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 $as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 @@ -5940,7 +7808,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 s&@infodir@&$infodir&g s&@localedir@&$localedir&g s&@mandir@&$mandir&g - s&\\\${datarootdir}&$datarootdir&g' ;; + s&\\\${datarootdir}&$datarootdir&g' ;; esac _ACEOF @@ -5968,12 +7836,14 @@ s&@INSTALL@&$ac_INSTALL&;t t $ac_datarootdir_hack " eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$tmp/subs.awk" >$tmp/out \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && { ac_out=`sed -n '/\${datarootdir}/p' "$tmp/out"`; test -n "$ac_out"; } && { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' "$tmp/out"`; test -z "$ac_out"; } && - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' + { $as_echo "$as_me:$LINENO: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&5 $as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' which seems to be undefined. Please make sure it is defined." >&2;} @@ -5983,7 +7853,9 @@ which seems to be undefined. Please make sure it is defined." >&2;} -) cat "$tmp/out" && rm -f "$tmp/out";; *) rm -f "$ac_file" && mv "$tmp/out" "$ac_file";; esac \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } ;; :H) # @@ -5994,19 +7866,25 @@ which seems to be undefined. Please make sure it is defined." >&2;} $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" } >"$tmp/config.h" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } if diff "$ac_file" "$tmp/config.h" >/dev/null 2>&1; then - { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 + { $as_echo "$as_me:$LINENO: $ac_file is unchanged" >&5 $as_echo "$as_me: $ac_file is unchanged" >&6;} else rm -f "$ac_file" mv "$tmp/config.h" "$ac_file" \ - || as_fn_error "could not create $ac_file" "$LINENO" 5 + || { { $as_echo "$as_me:$LINENO: error: could not create $ac_file" >&5 +$as_echo "$as_me: error: could not create $ac_file" >&2;} + { (exit 1); exit 1; }; } fi else $as_echo "/* $configure_input */" \ && eval '$AWK -f "$tmp/defines.awk"' "$ac_file_inputs" \ - || as_fn_error "could not create -" "$LINENO" 5 + || { { $as_echo "$as_me:$LINENO: error: could not create -" >&5 +$as_echo "$as_me: error: could not create -" >&2;} + { (exit 1); exit 1; }; } fi ;; @@ -6016,12 +7894,15 @@ $as_echo "$as_me: $ac_file is unchanged" >&6;} done # for ac_tag -as_fn_exit 0 +{ (exit 0); exit 0; } _ACEOF +chmod +x $CONFIG_STATUS ac_clean_files=$ac_clean_files_save test $ac_write_fail = 0 || - as_fn_error "write failure creating $CONFIG_STATUS" "$LINENO" 5 + { { $as_echo "$as_me:$LINENO: error: write failure creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: error: write failure creating $CONFIG_STATUS" >&2;} + { (exit 1); exit 1; }; } # configure is writing to config.log, and then calls config.status. @@ -6042,10 +7923,10 @@ if test "$no_create" != yes; then exec 5>>config.log # Use ||, not &&, to avoid exiting from the if with $? = 1, which # would make configure fail if this is the last instruction. - $ac_cs_success || as_fn_exit $? + $ac_cs_success || { (exit 1); exit 1; } fi if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 + { $as_echo "$as_me:$LINENO: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} fi diff --git a/configure.ac b/configure.ac index 0b357bc..f5223ea 100644 --- a/configure.ac +++ b/configure.ac @@ -63,6 +63,22 @@ AS_HELP_STRING([--disable-sctp],[disable sctp support])], []) AC_SUBST(SCTPLIBS) +# +# JSONLINES support +################################################# +AC_ARG_ENABLE(jsonlines, +[ +AS_HELP_STRING([--enable-jsonlines],[enable jsonlines support]) +AS_HELP_STRING([--disable-jsonlines],[disable jsonlines support])], +[ + if test $enableval != "no" ; then + CPPFLAGS="-DJSONLINESSUPPORT $CPPFLAGS" + IPFIX_JSONLINES_OBJS="ipfix_jsonlines.o ipfix_col_jsonlines.o json_out.o" + fi +], +[]) +AC_SUBST(IPFIX_JSONLINES_OBJS) + # # enable ssl support ################################################# @@ -155,7 +171,7 @@ AC_ARG_WITH(mysql, AC_MSG_WARN([cannot find mysql library]) ]) fi -] ) +]) AC_SUBST(MYSQLLIBS) AC_SUBST(IPFIX_DB_OBJ) AC_SUBST(IPFIX_DB_EXAMPLES) @@ -188,7 +204,7 @@ AC_ARG_WITH(pcap, LDFLAGS="$LDFLAGS -L$withval/lib" CPPFLAGS="$CPPFLAGS -I$withval/include" ], -[] ) +[]) SAVELIBS=$LIBS LIBS= AC_CHECK_LIB([$PCAP], [pcap_open_live],, diff --git a/examples/example_collector_db.c b/examples/example_collector_db.c index bcd7c1d..cc02acb 100644 --- a/examples/example_collector_db.c +++ b/examples/example_collector_db.c @@ -32,6 +32,7 @@ #include "ipfix_def_fokus.h" #include "ipfix_fields_fokus.h" #include "misc.h" +#include "mlog.h" /*------ defines ---------------------------------------------------------*/ @@ -152,10 +153,10 @@ int main (int argc, char *argv[]) exit(1); } - /** activate database export (jsonfile is not used) + /** activate database export */ if ( ipfix_col_init_mysqlexport( dbhost, dbuser, - dbpw, dbname, NULL ) <0 ) { + dbpw, dbname ) <0 ) { fprintf( stderr, "cannot connect to database\n" ); ipfix_cleanup(); exit(1); diff --git a/lib/Makefile.in b/lib/Makefile.in index 9975523..42a8ca3 100644 --- a/lib/Makefile.in +++ b/lib/Makefile.in @@ -42,8 +42,8 @@ INCLS = -I. -I.. -I../libmisc CFLAGS = $(CCOPT) $(INCLS) $(DEFS) TARGETS = ipfix_reverse_fields.h ipfix_def_fokus.h ipfix_fields_fokus.h ipfix_def_netscaler.h ipfix_fields_netscaler.h ipfix_reverse_fields_netscaler.h libipfix.a libipfix.so -SOURCES = ipfix.c ipfix_col.c ipfix_col_db.c ipfix_col_files.c ipfix_print.c json_out.c -OBJECTS = $(SOURCES:.c=.o) @IPFIX_DB_OBJ@ @IPFIX_SSL_OBJ@ +SOURCES = ipfix.c ipfix_col.c ipfix_col_jsonlines.c json_out.c ipfix_col_db.c ipfix_col_files.c ipfix_print.c +OBJECTS = $(SOURCES:.c=.o) @IPFIX_JSONLINES_OBJ@ @IPFIX_DB_OBJ@ @IPFIX_SSL_OBJ@ DHPARAMS = dh512.pem dh1024.pem CLEANFILES = $(TARGETS) *.d *.o *.so *.so.$(VERSION) DISTCLEANFILES = $(CLEANFILES) $(DHPARAMS) Makefile @@ -64,7 +64,7 @@ dhparams.c: $(DHPARAMS) $(OPENSSL) dh -noout -C < dh1024.pem >> $@ -ipfix.c: ipfix_reverse_fields.h json_out.h +ipfix.c: ipfix_reverse_fields.h ipfix_reverse_fields.h: ipfix_fields.h make-reverse-IPFIX_FIELDS_H.sed-script-file sed -f make-reverse-IPFIX_FIELDS_H.sed-script-file $< > $@ diff --git a/lib/ipfix_col.c b/lib/ipfix_col.c index 879a869..7a7aec4 100644 --- a/lib/ipfix_col.c +++ b/lib/ipfix_col.c @@ -49,6 +49,9 @@ #ifdef DBSUPPORT #include "ipfix_db.h" #endif +#ifdef JSONLINESSUPPORT +# include "ipfix_jsonlines.h" +#endif #include "ipfix_col.h" /*----- defines ----------------------------------------------------------*/ @@ -2016,14 +2019,6 @@ int ipfix_get_template_ident( ipfix_template_t *t, } -void ipfix_col_reload( void ) -{ -#ifdef DBSUPPORT - ipfix_col_db_reload(); -#endif -} - - /* * name: ipfix_col_cleanup() * parameters: none diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index 480f2cc..980c582 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -19,6 +19,9 @@ #ifdef DBSUPPORT #include #endif +#ifdef JSONLINESSUPPORT +# include +#endif #ifdef __cplusplus extern "C" { @@ -94,6 +97,7 @@ typedef struct ipfix_col_info ipfix_datarecord_t*,void*); int (*export_rawmsg)(ipfixs_node_t *source, const uint8_t* data, size_t len, void *arg); void (*export_cleanup)(void*); + void (*export_reload)(void*); void *data; } ipfix_col_info_t; @@ -111,8 +115,11 @@ typedef void* ipfix_col_t; void ipfix_col_init( void ); int ipfix_col_init_fileexport( char *datadir ); void ipfix_col_stop_fileexport( void ); -int ipfix_col_init_mysqlexport( char *host, char *user, char *pw, char *name, char *opt_jsonfile ); +int ipfix_col_init_mysqlexport( char *host, char *user, char *pw, char *name ); void ipfix_col_stop_mysqlexport( void ); +int ipfix_col_init_jsonlinesexport( char *jsonfile ); +void ipfix_col_stop_jsonlinesexport( void ); +void ipfix_col_reload_jsonlinesexport( void ); int ipfix_col_register_export( ipfix_col_info_t *colinfo ); int ipfix_col_cancel_export( ipfix_col_info_t *colinfo ); int ipfix_col_listen( int *nfds, int **fds, ipfix_proto_t protocol, @@ -141,6 +148,9 @@ const char *ipfix_col_input_get_ident( ipfix_input_t *input ); #ifdef DBSUPPORT # include #endif +#ifdef JSONLINESSUPPORT +# include +#endif #ifdef __cplusplus } diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index cb42a8e..906ba05 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -34,7 +34,6 @@ #ifdef DBSUPPORT #include "ipfix_db.h" #include "ipfix_col_db.h" -#include "json_out.h" #endif /*------ defines ---------------------------------------------------------*/ @@ -47,8 +46,6 @@ typedef struct ipfix_export_data_db { MYSQL *mysql; - char *json_filename; - FILE *json_file; } ipfixe_data_db_t; #endif @@ -151,127 +148,6 @@ int ipfix_export_trecord_db( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ) return 0; } -int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, - ipfixt_node_t *t, - ipfix_datarecord_t *d, - void *arg ) -{ - ipfixe_data_db_t *data = (ipfixe_data_db_t*)arg; - char *func = "export_drecord_jsonfile"; - int i; - - if ( !data->json_filename ) { - return -1; - } - - /* Write data set to a file as JSON. One JSON document per line. - */ - - if (strcmp(data->json_filename, "-") == 0) { - data->json_file = stdout; - } else { - if ( data->json_filename && data->json_file == NULL ) { - data->json_file = fopen(data->json_filename, "a"); - if (data->json_file == NULL) { - mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", - func, data->json_filename, strerror(errno)); - } - } - } - - fprintf(data->json_file, "{\"ipfix_template_id\":\"%d\"", t->ipfixt->tid); - - /* TODO The first attribute should be the template number. - */ - - for ( i=0; iipfixt->nfields; i++ ) { - if ( t->ipfixt->fields[i].elem->ft->eno == 0 - && t->ipfixt->fields[i].elem->ft->ftype == 0xD2 ) { - continue; /* D2 == 210, paddingOctets */ - } - - /* The attribute names come from trusted data, not from the protocol - */ - - fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); - - switch (t->ipfixt->fields[i].elem->ft->coding) { - case IPFIX_CODING_UINT: - switch (d->lens[i]) { - case 1: - fprintf(data->json_file, "%u", *((uint8_t *) (d->addrs[i])) ); - break; - case 2: - fprintf(data->json_file, "%u", *((uint16_t *) (d->addrs[i])) ); - break; - case 4: - fprintf(data->json_file, "%u", *((uint32_t *) (d->addrs[i])) ); - break; - case 8: - fprintf(data->json_file, "%"PRIu64, *((uint64_t *) (d->addrs[i])) ); - break; - default: - mlogf(1, "[%s] JSON emmission of type UINT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); - fprintf(data->json_file, "null"); - } - break; - case IPFIX_CODING_INT: - switch (d->lens[i]) { - case 1: - fprintf(data->json_file, "%d", *((int8_t *) (d->addrs[i])) ); - break; - case 2: - fprintf(data->json_file, "%d", *((int16_t *) (d->addrs[i])) ); - break; - case 4: - fprintf(data->json_file, "%d", *((int32_t *) (d->addrs[i])) ); - break; - case 8: - fprintf(data->json_file, "%"PRId64, *((uint64_t *) (d->addrs[i])) ); - break; - default: - mlogf(1, "[%s] JSON emmission of type INT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); - fprintf(data->json_file, "null"); - } - break; - case IPFIX_CODING_FLOAT: - mlogf(1, "[%s] JSON emmission of type FLOAT not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); - fprintf(data->json_file, "null"); - break; - case IPFIX_CODING_IPADDR: - { - char addrbuf[INET6_ADDRSTRLEN]; - - ipfix_snprint_ipaddr(addrbuf, INET6_ADDRSTRLEN, d->addrs[i], d->lens[i]); - - fprintf(data->json_file, "\"%s\"", addrbuf); - } - break; - case IPFIX_CODING_NTP: - json_render_NTP_timestamp_to_FILE(data->json_file, d->addrs[i], d->lens[i]); - break; - case IPFIX_CODING_STRING: - // don't forget JSON is meant to be UTF-8; IPFIX/Netscaler is ....? - json_render_string_to_FILE(data->json_file, (const char *) d->addrs[i], d->lens[i]); - break; - case IPFIX_CODING_BYTES: - json_render_bytes_as_hexpairs_to_FILE(data->json_file, d->addrs[i], d->lens[i]); - break; - default: - mlogf(1, "[%s] JSON emmission of type %d not currently supported (%s).\n", - func, t->ipfixt->fields[i].elem->ft->coding, t->ipfixt->fields[i].elem->ft->name); - fprintf(data->json_file, "null"); - } - } - - fprintf(data->json_file, "}\n"); - - if (data->json_file) { - fflush(data->json_file); - } - return 0; -} - int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, @@ -359,7 +235,6 @@ int ipfix_export_drecord_db( ipfixs_node_t *s, int ipfix_export_init_db( char *dbhost, char *dbuser, char *dbpw, char *dbname, - char *opt_jsonfile, void **arg ) { ipfixe_data_db_t *data; @@ -372,34 +247,15 @@ int ipfix_export_init_db( char *dbhost, char *dbuser, return -1; } - data->json_filename = opt_jsonfile; - data->json_file = NULL; - *arg = (void**)data; return 0; } -void ipfix_col_db_reload( void ) -{ - ipfixe_data_db_t *data = g_colinfo->data; - - if (data->json_file != NULL && data->json_file != stdout) { - fclose(data->json_file); - data->json_file = NULL; - /* It will get reopened when it next receives data */ - } -} - void ipfix_export_cleanup_db( void *arg ) { ipfixe_data_db_t *data = (ipfixe_data_db_t*)arg; if ( data ) { - if ( data->json_file ) { - fclose(data->json_file); - /* possible that the above could fail, but not sure what we would do */ - data->json_file = NULL; - } if ( data->mysql ) ipfix_db_close( &(data->mysql) ); free(data); @@ -411,13 +267,12 @@ void ipfix_export_cleanup_db( void *arg ) /*----- export funcs -----------------------------------------------------*/ int ipfix_col_init_mysqlexport( char *dbhost, char *dbuser, - char *dbpw, char *dbname, - char *opt_jsonfile ) + char *dbpw, char *dbname ) { #ifdef DBSUPPORT void *data; - if ( ipfix_export_init_db( dbhost, dbuser, dbpw, dbname, opt_jsonfile, &data ) <0 ) { + if ( ipfix_export_init_db( dbhost, dbuser, dbpw, dbname, &data ) <0 ) { return -1; } @@ -430,9 +285,6 @@ int ipfix_col_init_mysqlexport( char *dbhost, char *dbuser, g_colinfo->export_newmsg = ipfix_export_newmsg_db; g_colinfo->export_trecord = ipfix_export_trecord_db; g_colinfo->export_drecord = ipfix_export_drecord_db; - if (opt_jsonfile != NULL) { - g_colinfo->export_drecord = ipfix_export_drecord_jsonfile; - } g_colinfo->export_cleanup = ipfix_export_cleanup_db; g_colinfo->data = data; diff --git a/lib/ipfix_col_db.h b/lib/ipfix_col_db.h index fb8b6e8..40e4a39 100644 --- a/lib/ipfix_col_db.h +++ b/lib/ipfix_col_db.h @@ -21,14 +21,12 @@ int ipfix_export_newmsg_db( ipfixs_node_t *s, ipfix_hdr_t *hdr, void *arg ); int ipfix_export_trecord_db( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ); int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, void *arg ); -int ipfix_export_drecords_jsonfile( ipfixs_node_t *s, ipfixt_node_t *t, +int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, void *arg ); void ipfix_export_cleanup_db( void *arg ); int ipfix_export_init_db( char *dbhost, char *dbuser, char *dbpw, char *dbname, - char *opt_jsonfile, void **data ); -void ipfix_col_db_reload( void ); #ifdef __cplusplus } diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c new file mode 100644 index 0000000..ad9bab9 --- /dev/null +++ b/lib/ipfix_col_jsonlines.c @@ -0,0 +1,262 @@ +/* +$$LIC$$ + */ +/* +** ipfix_col_jsonfile.c - IPFIX collector functions for jsonlines +** +** Copyright Cameron Kerr, Fraunhofer FOKUS +** +*/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mlog.h" +#include "misc.h" +#include "ipfix.h" +#include "ipfix_col.h" +#include "ipfix_col_jsonlines.h" +#include "json_out.h" + +/*------ defines ---------------------------------------------------------*/ + +/*------ structs ---------------------------------------------------------*/ + +typedef struct ipfix_export_data_jsonlines +{ + char *json_filename; + FILE *json_file; +} ipfixe_data_jsonlines_t; + +/*------ globals ---------------------------------------------------------*/ + +/*----- revision id ------------------------------------------------------*/ + +/*----- globals ----------------------------------------------------------*/ + +/*----- static funcs -----------------------------------------------------*/ + +int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, + ipfixt_node_t *t, + ipfix_datarecord_t *d, + void *arg ) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data = (ipfixe_data_jsonlines_t*)arg; + char *func = "export_drecord_jsonlines"; + int i; + + if ( !data->json_filename ) { + return -1; + } + + /* Write data set to a file as JSON. One JSON document per line. + */ + + if (strcmp(data->json_filename, "-") == 0) { + data->json_file = stdout; + } else { + if ( data->json_filename && data->json_file == NULL ) { + data->json_file = fopen(data->json_filename, "a"); + if (data->json_file == NULL) { + mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", + func, data->json_filename, strerror(errno)); + } + } + } + + fprintf(data->json_file, "{\"ipfix_template_id\":\"%d\"", t->ipfixt->tid); + + /* TODO The first attribute should be the template number. + */ + + for ( i=0; iipfixt->nfields; i++ ) { + if ( t->ipfixt->fields[i].elem->ft->eno == 0 + && t->ipfixt->fields[i].elem->ft->ftype == 0xD2 ) { + continue; /* D2 == 210, paddingOctets */ + } + + /* The attribute names come from trusted data, not from the protocol + */ + + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); + + switch (t->ipfixt->fields[i].elem->ft->coding) { + case IPFIX_CODING_UINT: + switch (d->lens[i]) { + case 1: + fprintf(data->json_file, "%u", *((uint8_t *) (d->addrs[i])) ); + break; + case 2: + fprintf(data->json_file, "%u", *((uint16_t *) (d->addrs[i])) ); + break; + case 4: + fprintf(data->json_file, "%u", *((uint32_t *) (d->addrs[i])) ); + break; + case 8: + fprintf(data->json_file, "%"PRIu64, *((uint64_t *) (d->addrs[i])) ); + break; + default: + mlogf(1, "[%s] JSON emmission of type UINT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); + fprintf(data->json_file, "null"); + } + break; + case IPFIX_CODING_INT: + switch (d->lens[i]) { + case 1: + fprintf(data->json_file, "%d", *((int8_t *) (d->addrs[i])) ); + break; + case 2: + fprintf(data->json_file, "%d", *((int16_t *) (d->addrs[i])) ); + break; + case 4: + fprintf(data->json_file, "%d", *((int32_t *) (d->addrs[i])) ); + break; + case 8: + fprintf(data->json_file, "%"PRId64, *((uint64_t *) (d->addrs[i])) ); + break; + default: + mlogf(1, "[%s] JSON emmission of type INT (%d bytes) is NOT IMPLEMENTED (%s).\n", func, d->lens[i], t->ipfixt->fields[i].elem->ft->name); + fprintf(data->json_file, "null"); + } + break; + case IPFIX_CODING_FLOAT: + mlogf(1, "[%s] JSON emmission of type FLOAT not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); + fprintf(data->json_file, "null"); + break; + case IPFIX_CODING_IPADDR: + { + char addrbuf[INET6_ADDRSTRLEN]; + + ipfix_snprint_ipaddr(addrbuf, INET6_ADDRSTRLEN, d->addrs[i], d->lens[i]); + + fprintf(data->json_file, "\"%s\"", addrbuf); + } + break; + case IPFIX_CODING_NTP: + json_render_NTP_timestamp_to_FILE(data->json_file, d->addrs[i], d->lens[i]); + break; + case IPFIX_CODING_STRING: + // don't forget JSON is meant to be UTF-8; IPFIX/Netscaler is ....? + json_render_string_to_FILE(data->json_file, (const char *) d->addrs[i], d->lens[i]); + break; + case IPFIX_CODING_BYTES: + json_render_bytes_as_hexpairs_to_FILE(data->json_file, d->addrs[i], d->lens[i]); + break; + default: + mlogf(1, "[%s] JSON emmission of type %d not currently supported (%s).\n", + func, t->ipfixt->fields[i].elem->ft->coding, t->ipfixt->fields[i].elem->ft->name); + fprintf(data->json_file, "null"); + } + } + + fprintf(data->json_file, "}\n"); + + if (data->json_file) { + /* TODO: Need to be able to have a more performant flushing policy */ + fflush(data->json_file); + } +#endif + return 0; +} + +int ipfix_export_init_jsonlines( char *jsonfile, void **arg ) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data; + + if ( (data=calloc( 1, sizeof(ipfixe_data_jsonlines_t))) ==NULL) + return -1; + + data->json_filename = jsonfile; + data->json_file = NULL; + + *arg = (void**)data; +#endif + return 0; +} + +void ipfix_export_reload_jsonlines( void *arg ) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data = (ipfixe_data_jsonlines_t*)arg; + + if (data->json_file != NULL && data->json_file != stdout) { + fclose(data->json_file); + data->json_file = NULL; + /* It will get reopened when it next receives data */ + } +#endif +} + +void ipfix_export_cleanup_jsonlines( void *arg ) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data = arg; + + if ( data ) { + if (data->json_file != NULL && data->json_file != stdout) { + fclose(data->json_file); + data->json_file = NULL; + } + + free(data); + } +#endif +} + + +/*----- export funcs -----------------------------------------------------*/ + +int ipfix_col_init_jsonlinesexport( char *jsonfile ) +{ +#ifdef JSONLINESSUPPORT + void *data; + + if ( ipfix_export_init_jsonlines(jsonfile, &data) <0 ) { + return -1; + } + + if ( (g_colinfo=calloc( 1, sizeof(ipfix_col_info_t))) ==NULL) { + ipfix_export_cleanup_jsonlines( data ); + return -1; + } + + g_colinfo->export_drecord = ipfix_export_drecord_jsonlines; + g_colinfo->export_cleanup = ipfix_export_cleanup_jsonlines; + g_colinfo->export_reload = ipfix_export_reload_jsonlines; + g_colinfo->data = data; + + return ipfix_col_register_export( g_colinfo ); +#endif + errno = ENODEV; + return -1; +} + +void ipfix_col_stop_jsonlinesexport(void) +{ +#ifdef JSONLINESSUPPORT + /* currently the same as a reload */ + ipfix_export_reload_jsonlines(g_colinfo->data); +#endif +} + +void ipfix_col_reload_jsonlinesexport(void) +{ +#ifdef JSONLINESSUPPORT + ipfix_export_reload_jsonlines(g_colinfo->data); +#endif +} + diff --git a/lib/ipfix_col_jsonlines.h b/lib/ipfix_col_jsonlines.h new file mode 100644 index 0000000..792fc9f --- /dev/null +++ b/lib/ipfix_col_jsonlines.h @@ -0,0 +1,28 @@ +/* + * ipfix_col_jsonlines.h - Private API for the JSONLINES mode + * + * Copyright Cameron Kerr + */ + +#ifndef IPFIX_COL_JSONLINES_H +#define IPFIX_COL_JSONLINES_H + +#ifdef __cplusplus +extern "C" { +#endif + +int ipfix_export_drecord_jsonlines( + ipfixs_node_t *s, /* < Parsed message */ + ipfixt_node_t *t, /* < Relevant template information for this data record */ + ipfix_datarecord_t *d, /* < Data record to be emitted */ + void *arg /* <> jsonlines state */ + ); + +void ipfix_export_reload_jsonlines(void *arg); +void ipfix_export_cleanup_jsonlinesexport( void *arg ); +int ipfix_export_init_jsonlinesexport(char *jsonfile, void **data ); + +#ifdef __cplusplus +} +#endif +#endif diff --git a/lib/ipfix_jsonlines.c b/lib/ipfix_jsonlines.c new file mode 100644 index 0000000..c3948ac --- /dev/null +++ b/lib/ipfix_jsonlines.c @@ -0,0 +1,17 @@ +/* + * ipfix_jsonlines.c - database access functions + * + * Copyright Cameron Kerr + */ +#include +#include +#include +#include +#include +#include +#include + +#include "mlog.h" +#include "misc.h" +#include "ipfix_jsonlines.h" + diff --git a/lib/ipfix_jsonlines.h b/lib/ipfix_jsonlines.h new file mode 100644 index 0000000..0ba0458 --- /dev/null +++ b/lib/ipfix_jsonlines.h @@ -0,0 +1,13 @@ +#ifndef _IPFIX_JSONLINES_H +#define _IPFIX_JSONLINES_H + +#include "ipfix.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef __cplusplus +} +#endif +#endif From d38f0e7d2c0e8092753bd54d15727f7b7e85e884 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Tue, 3 Mar 2015 15:38:54 +1300 Subject: [PATCH 27/48] Resolve #15 Log IP address of exporting device --- lib/ipfix_col.c | 9 +++++---- lib/ipfix_col.h | 2 +- lib/ipfix_col_db.c | 5 ++++- lib/ipfix_col_db.h | 3 ++- lib/ipfix_col_files.c | 5 ++++- lib/ipfix_col_jsonlines.c | 25 +++++++++++++++++++++++-- lib/ipfix_col_jsonlines.h | 3 ++- lib/ipfix_print.c | 5 ++++- 8 files changed, 45 insertions(+), 12 deletions(-) diff --git a/lib/ipfix_col.c b/lib/ipfix_col.c index 7a7aec4..2a01e0f 100644 --- a/lib/ipfix_col.c +++ b/lib/ipfix_col.c @@ -821,7 +821,8 @@ int ipfix_export_trecord( ipfixs_node_t *s, */ int ipfix_export_datarecord( ipfixs_node_t *s, ipfixt_node_t *t, - ipfix_datarecord_t *data ) + ipfix_datarecord_t *data, + ipfix_input_t *source ) { ipfixe_node_t *e; @@ -829,7 +830,7 @@ int ipfix_export_datarecord( ipfixs_node_t *s, */ for ( e=g_exporter; e!=NULL; e=e->next ) { if ( e->elem->export_drecord ) - (void) e->elem->export_drecord( s, t, data, e->elem->data ); + (void) e->elem->export_drecord( s, t, data, e->elem->data, source); } return 0; @@ -1078,7 +1079,7 @@ int ipfix_parse_msg( ipfix_input_t *input, goto errend; } - (void) ipfix_export_datarecord( s, t, &data ); + (void) ipfix_export_datarecord( s, t, &data, input ); bytesleft -= bytes; offset += bytes; @@ -1245,7 +1246,7 @@ int ipfix_parse_raw_msg(ipfixs_node_t *src, ipfixe_node_t *local_exporter, cons for ( e=local_exporter; e!=NULL; e=e->next ) { if ( e->elem->export_drecord ) - (void) e->elem->export_drecord( src, t, &data, e->elem->data ); + (void) e->elem->export_drecord( src, t, &data, e->elem->data, NULL ); } //(void) ipfix_export_datarecord( src, t, &data ); diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index 980c582..9d1d1f0 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -94,7 +94,7 @@ typedef struct ipfix_col_info int (*export_trecord)(ipfixs_node_t*,ipfixt_node_t*,void*); int (*export_dset)(ipfixt_node_t*,const uint8_t*,size_t,void*); int (*export_drecord)(ipfixs_node_t*,ipfixt_node_t*, - ipfix_datarecord_t*,void*); + ipfix_datarecord_t*,void*,ipfix_input_t*); int (*export_rawmsg)(ipfixs_node_t *source, const uint8_t* data, size_t len, void *arg); void (*export_cleanup)(void*); void (*export_reload)(void*); diff --git a/lib/ipfix_col_db.c b/lib/ipfix_col_db.c index 906ba05..0b56e25 100644 --- a/lib/ipfix_col_db.c +++ b/lib/ipfix_col_db.c @@ -151,12 +151,15 @@ int ipfix_export_trecord_db( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ) int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, - void *arg ) + void *arg, + ipfix_input_t *source ) { ipfixe_data_db_t *data = (ipfixe_data_db_t*)arg; char *func = "export_drecord_db"; int i, nbytes, binary_f=0; + (void) source; + if ( !data->mysql ) return -1; diff --git a/lib/ipfix_col_db.h b/lib/ipfix_col_db.h index 40e4a39..8738953 100644 --- a/lib/ipfix_col_db.h +++ b/lib/ipfix_col_db.h @@ -20,7 +20,8 @@ int ipfix_export_newsrc_db( ipfixs_node_t *s, void *arg ) ; int ipfix_export_newmsg_db( ipfixs_node_t *s, ipfix_hdr_t *hdr, void *arg ); int ipfix_export_trecord_db( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ); int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, - ipfix_datarecord_t *d, void *arg ); + ipfix_datarecord_t *d, void *arg, + ipfix_input_t *source ); int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, void *arg ); void ipfix_export_cleanup_db( void *arg ); diff --git a/lib/ipfix_col_files.c b/lib/ipfix_col_files.c index 732b940..9036e88 100644 --- a/lib/ipfix_col_files.c +++ b/lib/ipfix_col_files.c @@ -142,11 +142,14 @@ static int export_trecord_file( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ) static int export_drecord_file( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *data, - void *arg ) + void *arg, + ipfix_template_t *source ) { char tmpbuf[2000]; int i, nbytes; + (void) source; + if ( s->fp ) { /** write record into file */ diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index ad9bab9..81bd2ba 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -51,12 +51,14 @@ typedef struct ipfix_export_data_jsonlines int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, - void *arg ) + void *arg, + ipfix_input_t *source ) { #ifdef JSONLINESSUPPORT ipfixe_data_jsonlines_t *data = (ipfixe_data_jsonlines_t*)arg; char *func = "export_drecord_jsonlines"; int i; + char exporter_ip[INET6_ADDRSTRLEN]; if ( !data->json_filename ) { return -1; @@ -77,7 +79,26 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, } } - fprintf(data->json_file, "{\"ipfix_template_id\":\"%d\"", t->ipfixt->tid); + if (source != NULL && source->type == IPFIX_INPUT_IPCON + && source->u.ipcon.addr->sa_family == AF_INET) + { + inet_ntop( AF_INET, & ((struct sockaddr_in *)(source->u.ipcon.addr))->sin_addr.s_addr, + exporter_ip, INET6_ADDRSTRLEN); + fprintf(data->json_file, "{\"ipfix_exporter_ip\":\"%s\"", exporter_ip); + } + else if (source != NULL && source->type == IPFIX_INPUT_IPCON + && source->u.ipcon.addr->sa_family == AF_INET6) + { + inet_ntop( AF_INET6, & ((struct sockaddr_in6 *)(source->u.ipcon.addr))->sin6_addr, + exporter_ip, INET6_ADDRSTRLEN); + fprintf(data->json_file, "{\"ipfix_exporter_ip\":\"%s\"", exporter_ip); + } + else + { + fprintf(data->json_file, "{\"ipfix_exporter_ip\":null"); + } + + fprintf(data->json_file, ", \"ipfix_template_id\":\"%d\"", t->ipfixt->tid); /* TODO The first attribute should be the template number. */ diff --git a/lib/ipfix_col_jsonlines.h b/lib/ipfix_col_jsonlines.h index 792fc9f..ac516b4 100644 --- a/lib/ipfix_col_jsonlines.h +++ b/lib/ipfix_col_jsonlines.h @@ -15,7 +15,8 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, /* < Parsed message */ ipfixt_node_t *t, /* < Relevant template information for this data record */ ipfix_datarecord_t *d, /* < Data record to be emitted */ - void *arg /* <> jsonlines state */ + void *arg, /* <> jsonlines state */ + ipfix_input_t *source /* < Potential IP address information. May be NULL */ ); void ipfix_export_reload_jsonlines(void *arg); diff --git a/lib/ipfix_print.c b/lib/ipfix_print.c index b9eb780..3eb51be 100644 --- a/lib/ipfix_print.c +++ b/lib/ipfix_print.c @@ -126,12 +126,15 @@ static int ipfix_print_trecord( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ) static int ipfix_print_drecord( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *data, - void *arg ) + void *arg, + ipfix_input_t *source ) { char tmpbuf[2000]; int i; FILE *fp = (FILE*)arg; + (void) source; + if ( !t || !s || !data ) return -1; From 6ad746fb9a69365bcf3be66f41c399c37bc8f51f Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 4 Mar 2015 11:23:46 +1300 Subject: [PATCH 28/48] Resolve #22 Exit JSON for 'no template for XXX, skip data set' --- lib/ipfix_col.c | 7 +++ lib/ipfix_col.h | 1 + lib/ipfix_col_jsonlines.c | 112 ++++++++++++++++++++++++++++++++------ lib/ipfix_col_jsonlines.h | 1 + 4 files changed, 105 insertions(+), 16 deletions(-) diff --git a/lib/ipfix_col.c b/lib/ipfix_col.c index 2a01e0f..641e383 100644 --- a/lib/ipfix_col.c +++ b/lib/ipfix_col.c @@ -1059,6 +1059,13 @@ int ipfix_parse_msg( ipfix_input_t *input, if ( (t=_get_ipfixt( s->templates, setid )) ==NULL ) { mlogf( 0, "[%s] no template for %d, skip data set\n", func, setid ); + + for ( e=g_exporter; e!=NULL; e=e->next ) { + if ( e->elem->export_notify_no_template_for_set ) + (void) e->elem->export_notify_no_template_for_set( + setid, s, buf+nread, setlen, e->elem->data ); + } + nread += setlen; err_flag = 1; } diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index 9d1d1f0..918d2c4 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -96,6 +96,7 @@ typedef struct ipfix_col_info int (*export_drecord)(ipfixs_node_t*,ipfixt_node_t*, ipfix_datarecord_t*,void*,ipfix_input_t*); int (*export_rawmsg)(ipfixs_node_t *source, const uint8_t* data, size_t len, void *arg); + int (*export_notify_no_template_for_set)(int,ipfixs_node_t*,const uint8_t*,size_t,void*); void (*export_cleanup)(void*); void (*export_reload)(void*); void *data; diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index 81bd2ba..183b4d8 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -48,6 +48,31 @@ typedef struct ipfix_export_data_jsonlines /*----- static funcs -----------------------------------------------------*/ +int _jsonlines_ensure_open(const char *func, ipfixe_data_jsonlines_t *data) +{ + if (strcmp(data->json_filename, "-") == 0) { + data->json_file = stdout; + } else { + if ( data->json_filename && data->json_file == NULL ) { + data->json_file = fopen(data->json_filename, "a"); + if (data->json_file == NULL) { + mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", + func, data->json_filename, strerror(errno)); + return -1; + } + } + } + return 0; +} + +static void _jsonlines_after_message(ipfixe_data_jsonlines_t *data) +{ + if (data->json_file) { + /* TODO: Need to be able to have a more performant flushing policy */ + fflush(data->json_file); + } +} + int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, @@ -67,16 +92,9 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, /* Write data set to a file as JSON. One JSON document per line. */ - if (strcmp(data->json_filename, "-") == 0) { - data->json_file = stdout; - } else { - if ( data->json_filename && data->json_file == NULL ) { - data->json_file = fopen(data->json_filename, "a"); - if (data->json_file == NULL) { - mlogf( 0, "[%s] opening file '%s' for appending failed: %s\n", - func, data->json_filename, strerror(errno)); - } - } + if (_jsonlines_ensure_open(func, data) < 0) + { + return -1; } if (source != NULL && source->type == IPFIX_INPUT_IPCON @@ -185,10 +203,69 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, fprintf(data->json_file, "}\n"); - if (data->json_file) { - /* TODO: Need to be able to have a more performant flushing policy */ - fflush(data->json_file); + _jsonlines_after_message(data); +#endif + return 0; +} + +int ipfix_export_newsource_jsonlines( ipfixs_node_t *s, void *arg ) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data = (ipfixe_data_jsonlines_t*)arg; + + if (_jsonlines_ensure_open(__FUNCTION__, data) < 0) + { + return -1; } + + /* "ipfix_collector_notify":"newsource" */ + + fprintf(data->json_file, + "{ \"ipfix_collector_notice\":\"newsource\"" + ", \"summary\":\"new source seen %s/%lu\"" + ", \"ipfix_exporter_ip\":\"%s\"" + ", \"observationDomainId\":%lu" + "}\n", + ipfix_col_input_get_ident( s->input ), (u_long)s->odid, + ipfix_col_input_get_ident( s->input ), (u_long)s->odid ); + + _jsonlines_after_message(data); + +#endif + return 0; +} + +int ipfix_export_notify_no_template_for_set( + int template_id, + ipfixs_node_t * source, + const uint8_t * set_start, + size_t set_len, + void * arg) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data = (ipfixe_data_jsonlines_t*)arg; + + if (_jsonlines_ensure_open(__FUNCTION__, data) < 0) + { + return -1; + } + + fprintf(data->json_file, + "{ \"ipfix_collector_notice\":\"no_template_for_set\"" + ", \"ipfix_template_id\":\"%d\"" + ", \"ipfix_exporter_ip\":\"%s\"" + ", \"summary\":\"no template for %d, skip data set\"" + ", \"set_bytes\":\"", + template_id, + ipfix_col_input_get_ident( source->input ), + template_id); + + json_render_bytes_as_hexpairs_to_FILE(data->json_file, set_start, set_len); + + fprintf(data->json_file, + "\"}\n"); + + _jsonlines_after_message(data); #endif return 0; } @@ -255,9 +332,12 @@ int ipfix_col_init_jsonlinesexport( char *jsonfile ) return -1; } - g_colinfo->export_drecord = ipfix_export_drecord_jsonlines; - g_colinfo->export_cleanup = ipfix_export_cleanup_jsonlines; - g_colinfo->export_reload = ipfix_export_reload_jsonlines; + g_colinfo->export_drecord = ipfix_export_drecord_jsonlines; + g_colinfo->export_cleanup = ipfix_export_cleanup_jsonlines; + g_colinfo->export_reload = ipfix_export_reload_jsonlines; + g_colinfo->export_newsource = ipfix_export_newsource_jsonlines; + g_colinfo->export_notify_no_template_for_set = ipfix_export_notify_no_template_for_set; + g_colinfo->data = data; return ipfix_col_register_export( g_colinfo ); diff --git a/lib/ipfix_col_jsonlines.h b/lib/ipfix_col_jsonlines.h index ac516b4..ff9b7ae 100644 --- a/lib/ipfix_col_jsonlines.h +++ b/lib/ipfix_col_jsonlines.h @@ -18,6 +18,7 @@ int ipfix_export_drecord_jsonlines( void *arg, /* <> jsonlines state */ ipfix_input_t *source /* < Potential IP address information. May be NULL */ ); +int ipfix_export_newsrc_jsonlines( ipfixs_node_t *s, void *arg ); void ipfix_export_reload_jsonlines(void *arg); void ipfix_export_cleanup_jsonlinesexport( void *arg ); From f9f5433e84080a8dc9b87f1cc478c7b8784a118f Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 4 Mar 2015 13:53:41 +1300 Subject: [PATCH 29/48] Fix invalid-JSON output for bytes for 'no template for set' messages --- lib/ipfix_col_jsonlines.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index 183b4d8..91f4852 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -255,7 +255,7 @@ int ipfix_export_notify_no_template_for_set( ", \"ipfix_template_id\":\"%d\"" ", \"ipfix_exporter_ip\":\"%s\"" ", \"summary\":\"no template for %d, skip data set\"" - ", \"set_bytes\":\"", + ", \"set_bytes\":", template_id, ipfix_col_input_get_ident( source->input ), template_id); @@ -263,7 +263,7 @@ int ipfix_export_notify_no_template_for_set( json_render_bytes_as_hexpairs_to_FILE(data->json_file, set_start, set_len); fprintf(data->json_file, - "\"}\n"); + "}\n"); _jsonlines_after_message(data); #endif From b12d85e31ee1448c04adc8ce510f32933295bb93 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 5 Mar 2015 15:04:13 +1300 Subject: [PATCH 30/48] Resolve #25 Omit JSON attributes if value is empty empty in this case means "" for strings, or a 0-byte array for bytes. --- lib/ipfix_col_jsonlines.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index 91f4852..ca43be9 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -122,18 +122,21 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, */ for ( i=0; iipfixt->nfields; i++ ) { + if ( t->ipfixt->fields[i].elem->ft->eno == 0 && t->ipfixt->fields[i].elem->ft->ftype == 0xD2 ) { continue; /* D2 == 210, paddingOctets */ } - /* The attribute names come from trusted data, not from the protocol + /* + * The attribute names come from trusted data, not from the protocol. + * We print out the header in each case, as we might wish to ignore the + * attribute based on the value (eg. a string "") */ - fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); - switch (t->ipfixt->fields[i].elem->ft->coding) { case IPFIX_CODING_UINT: + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); switch (d->lens[i]) { case 1: fprintf(data->json_file, "%u", *((uint8_t *) (d->addrs[i])) ); @@ -153,6 +156,7 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, } break; case IPFIX_CODING_INT: + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); switch (d->lens[i]) { case 1: fprintf(data->json_file, "%d", *((int8_t *) (d->addrs[i])) ); @@ -172,10 +176,12 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, } break; case IPFIX_CODING_FLOAT: + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); mlogf(1, "[%s] JSON emmission of type FLOAT not complete yet (%s).\n", func, t->ipfixt->fields[i].elem->ft->name); fprintf(data->json_file, "null"); break; case IPFIX_CODING_IPADDR: + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); { char addrbuf[INET6_ADDRSTRLEN]; @@ -185,16 +191,26 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, } break; case IPFIX_CODING_NTP: + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); json_render_NTP_timestamp_to_FILE(data->json_file, d->addrs[i], d->lens[i]); break; case IPFIX_CODING_STRING: - // don't forget JSON is meant to be UTF-8; IPFIX/Netscaler is ....? - json_render_string_to_FILE(data->json_file, (const char *) d->addrs[i], d->lens[i]); + if ((d->lens[i] > 0) && ((const char *)(d->addrs[i]))[0] != '\0') + { + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); + // don't forget JSON is meant to be UTF-8; IPFIX/Netscaler is ....? + json_render_string_to_FILE(data->json_file, (const char *) d->addrs[i], d->lens[i]); + } break; case IPFIX_CODING_BYTES: - json_render_bytes_as_hexpairs_to_FILE(data->json_file, d->addrs[i], d->lens[i]); + if (d->lens[i] > 0) + { + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); + json_render_bytes_as_hexpairs_to_FILE(data->json_file, d->addrs[i], d->lens[i]); + } break; default: + fprintf(data->json_file, ", \"%s\":", t->ipfixt->fields[i].elem->ft->name); mlogf(1, "[%s] JSON emmission of type %d not currently supported (%s).\n", func, t->ipfixt->fields[i].elem->ft->coding, t->ipfixt->fields[i].elem->ft->name); fprintf(data->json_file, "null"); From 3e1c5d4c6121344f97ea68435d1d3499e2b78294 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 5 Mar 2015 16:38:49 +1300 Subject: [PATCH 31/48] Remove misplaced declaration of ipfix_export_drecord_jsonfile --- lib/ipfix_col_db.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/ipfix_col_db.h b/lib/ipfix_col_db.h index 8738953..fc27994 100644 --- a/lib/ipfix_col_db.h +++ b/lib/ipfix_col_db.h @@ -22,8 +22,6 @@ int ipfix_export_trecord_db( ipfixs_node_t *s, ipfixt_node_t *t, void *arg ); int ipfix_export_drecord_db( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *d, void *arg, ipfix_input_t *source ); -int ipfix_export_drecord_jsonfile( ipfixs_node_t *s, ipfixt_node_t *t, - ipfix_datarecord_t *d, void *arg ); void ipfix_export_cleanup_db( void *arg ); int ipfix_export_init_db( char *dbhost, char *dbuser, char *dbpw, char *dbname, From 9b557c4b2676ec386a6765f6d74e2058d0b39f57 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 5 Mar 2015 16:41:46 +1300 Subject: [PATCH 32/48] Fixed up faulty declaration leading to invalid pointer assignment --- lib/ipfix_col_files.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/ipfix_col_files.c b/lib/ipfix_col_files.c index 9036e88..fbf0c3e 100644 --- a/lib/ipfix_col_files.c +++ b/lib/ipfix_col_files.c @@ -143,7 +143,7 @@ static int export_drecord_file( ipfixs_node_t *s, ipfixt_node_t *t, ipfix_datarecord_t *data, void *arg, - ipfix_template_t *source ) + ipfix_input_t *source ) { char tmpbuf[2000]; int i, nbytes; From f53e296c9a07cf260d1be70a844987eb6d084c51 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 5 Mar 2015 22:04:32 +1300 Subject: [PATCH 33/48] Resolve #27 Print correct timestamps in ipfix_print_newmsg --- lib/ipfix_print.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/lib/ipfix_print.c b/lib/ipfix_print.c index 3eb51be..93712a1 100644 --- a/lib/ipfix_print.c +++ b/lib/ipfix_print.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -74,26 +75,30 @@ static int ipfix_print_newmsg( ipfixs_node_t *s, ipfix_hdr_t *hdr, void *arg ) { char timebuf[51]; FILE *fp = (FILE*)arg; + struct tm timeval_tm; + time_t message_time; /* don't cast from uint32_t to variable size time_t */ /* print header */ outf( fp, "IPFIX-HDR:\n version=%u,", hdr->version ); if ( hdr->version == IPFIX_VERSION_NF9 ) { outf( fp, " records=%u\n", hdr->u.nf9.count ); + message_time = hdr->u.nf9.unixtime; strftime( timebuf, 40, "%Y-%m-%d %T %Z", - localtime( (const time_t *) &(hdr->u.nf9.unixtime) )); - outf( fp, " sysuptime=%.3fs, unixtime=%lu (%s)\n", + localtime_r( &message_time, &timeval_tm )); + outf( fp, " sysuptime=%.3fs, unixtime=%"PRIu32" (%s)\n", (double)(hdr->u.nf9.sysuptime)/1000.0, - (u_long)hdr->u.nf9.unixtime, timebuf ); + hdr->u.nf9.unixtime, timebuf ); outf( fp, " seqno=%lu,", (u_long)hdr->seqno ); outf( fp, " sourceid=%lu\n", (u_long)hdr->sourceid ); } else { outf( fp, " length=%u\n", hdr->u.ipfix.length ); + message_time = hdr->u.ipfix.exporttime; strftime( timebuf, 40, "%Y-%m-%d %T %Z", - localtime( (const time_t *) &(hdr->u.ipfix.exporttime) )); - outf( fp, " unixtime=%lu (%s)\n", - (u_long)hdr->u.ipfix.exporttime, timebuf ); + localtime_r( &message_time, &timeval_tm )); + outf( fp, " unixtime=%"PRIu32" (%s)\n", + hdr->u.ipfix.exporttime, timebuf ); outf( fp, " seqno=%lu,", (u_long)hdr->seqno ); outf( fp, " odid=%lu\n", (u_long)hdr->sourceid ); } From 9505085977a91f5138a8fe5d3d8e6ca0016a043b Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 5 Mar 2015 22:22:03 +1300 Subject: [PATCH 34/48] Renamed function that didn't match with expected pattern --- lib/ipfix_col_jsonlines.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index ca43be9..9456d40 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -251,7 +251,7 @@ int ipfix_export_newsource_jsonlines( ipfixs_node_t *s, void *arg ) return 0; } -int ipfix_export_notify_no_template_for_set( +int ipfix_export_notify_no_template_for_set_jsonlines( int template_id, ipfixs_node_t * source, const uint8_t * set_start, @@ -348,11 +348,11 @@ int ipfix_col_init_jsonlinesexport( char *jsonfile ) return -1; } - g_colinfo->export_drecord = ipfix_export_drecord_jsonlines; - g_colinfo->export_cleanup = ipfix_export_cleanup_jsonlines; - g_colinfo->export_reload = ipfix_export_reload_jsonlines; - g_colinfo->export_newsource = ipfix_export_newsource_jsonlines; - g_colinfo->export_notify_no_template_for_set = ipfix_export_notify_no_template_for_set; + g_colinfo->export_drecord = ipfix_export_drecord_jsonlines; + g_colinfo->export_cleanup = ipfix_export_cleanup_jsonlines; + g_colinfo->export_reload = ipfix_export_reload_jsonlines; + g_colinfo->export_newsource = ipfix_export_newsource_jsonlines; + g_colinfo->export_notify_no_template_for_set = ipfix_export_notify_no_template_for_set_jsonlines; g_colinfo->data = data; From 5ae50a16c0b61b9e811032819edec1efbf7c693f Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 5 Mar 2015 23:06:43 +1300 Subject: [PATCH 35/48] Resolve #26 Timestamps now included --- lib/ipfix_col_jsonlines.c | 57 +++++++++++++++++++++++++++++++++++---- 1 file changed, 52 insertions(+), 5 deletions(-) diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index 9456d40..0de49aa 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -34,10 +34,15 @@ /*------ structs ---------------------------------------------------------*/ +/* Constant size 1977-09-06T01:02:03.004Z */ +#define JSON_MESSAGE_TIMESTAMP_SIZE 25 +#define JSON_MESSAGE_TIMESTAMP_ATTRIBUTE_NAME "ipfix_timestamp" + typedef struct ipfix_export_data_jsonlines { char *json_filename; FILE *json_file; + char message_timestamp_str[JSON_MESSAGE_TIMESTAMP_SIZE]; } ipfixe_data_jsonlines_t; /*------ globals ---------------------------------------------------------*/ @@ -97,23 +102,26 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, return -1; } + fprintf(data->json_file, "{\"" JSON_MESSAGE_TIMESTAMP_ATTRIBUTE_NAME "\":\"%s\"", + data->message_timestamp_str); + if (source != NULL && source->type == IPFIX_INPUT_IPCON && source->u.ipcon.addr->sa_family == AF_INET) { inet_ntop( AF_INET, & ((struct sockaddr_in *)(source->u.ipcon.addr))->sin_addr.s_addr, exporter_ip, INET6_ADDRSTRLEN); - fprintf(data->json_file, "{\"ipfix_exporter_ip\":\"%s\"", exporter_ip); + fprintf(data->json_file, ", \"ipfix_exporter_ip\":\"%s\"", exporter_ip); } else if (source != NULL && source->type == IPFIX_INPUT_IPCON && source->u.ipcon.addr->sa_family == AF_INET6) { inet_ntop( AF_INET6, & ((struct sockaddr_in6 *)(source->u.ipcon.addr))->sin6_addr, exporter_ip, INET6_ADDRSTRLEN); - fprintf(data->json_file, "{\"ipfix_exporter_ip\":\"%s\"", exporter_ip); + fprintf(data->json_file, ", \"ipfix_exporter_ip\":\"%s\"", exporter_ip); } else { - fprintf(data->json_file, "{\"ipfix_exporter_ip\":null"); + fprintf(data->json_file, ", \"ipfix_exporter_ip\":null"); } fprintf(data->json_file, ", \"ipfix_template_id\":\"%d\"", t->ipfixt->tid); @@ -237,11 +245,13 @@ int ipfix_export_newsource_jsonlines( ipfixs_node_t *s, void *arg ) /* "ipfix_collector_notify":"newsource" */ fprintf(data->json_file, - "{ \"ipfix_collector_notice\":\"newsource\"" + "{ \"" JSON_MESSAGE_TIMESTAMP_ATTRIBUTE_NAME "\":\"%s\"" + ", \"ipfix_collector_notice\":\"newsource\"" ", \"summary\":\"new source seen %s/%lu\"" ", \"ipfix_exporter_ip\":\"%s\"" ", \"observationDomainId\":%lu" "}\n", + data->message_timestamp_str, ipfix_col_input_get_ident( s->input ), (u_long)s->odid, ipfix_col_input_get_ident( s->input ), (u_long)s->odid ); @@ -267,11 +277,13 @@ int ipfix_export_notify_no_template_for_set_jsonlines( } fprintf(data->json_file, - "{ \"ipfix_collector_notice\":\"no_template_for_set\"" + "{ \"" JSON_MESSAGE_TIMESTAMP_ATTRIBUTE_NAME "\":\"%s\"" + ", \"ipfix_collector_notice\":\"no_template_for_set\"" ", \"ipfix_template_id\":\"%d\"" ", \"ipfix_exporter_ip\":\"%s\"" ", \"summary\":\"no template for %d, skip data set\"" ", \"set_bytes\":", + data->message_timestamp_str, template_id, ipfix_col_input_get_ident( source->input ), template_id); @@ -286,6 +298,40 @@ int ipfix_export_notify_no_template_for_set_jsonlines( return 0; } +int ipfix_export_newmsg_jsonlines(ipfixs_node_t * s, ipfix_hdr_t * hdr, void * arg) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data = (ipfixe_data_jsonlines_t*)arg; + + time_t message_time; + struct timeval timestamp_tv; + struct tm timestamp_tm; + + (void) s; + + if ( hdr->version == IPFIX_VERSION_NF9 ) + { + message_time = hdr->u.nf9.unixtime; + } + else + { + message_time = hdr->u.ipfix.exporttime; + } + + memset(×tamp_tv, 0, sizeof(timestamp_tv)); + + timestamp_tv.tv_sec = message_time; + timestamp_tv.tv_usec = 0; + + gmtime_r(×tamp_tv.tv_sec, ×tamp_tm); + + strftime(data->message_timestamp_str, JSON_MESSAGE_TIMESTAMP_SIZE, + "%Y-%m-%dT%H:%M:%SZ", ×tamp_tm); + +#endif + return 0; +} + int ipfix_export_init_jsonlines( char *jsonfile, void **arg ) { #ifdef JSONLINESSUPPORT @@ -353,6 +399,7 @@ int ipfix_col_init_jsonlinesexport( char *jsonfile ) g_colinfo->export_reload = ipfix_export_reload_jsonlines; g_colinfo->export_newsource = ipfix_export_newsource_jsonlines; g_colinfo->export_notify_no_template_for_set = ipfix_export_notify_no_template_for_set_jsonlines; + g_colinfo->export_newmsg = ipfix_export_newmsg_jsonlines; g_colinfo->data = data; From 5ec4a0a8218e752afdca9661f94ef6aa92b288f1 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 5 Mar 2015 23:34:36 +1300 Subject: [PATCH 36/48] Resolve #28 'set_bytes' attribute only send if requested --- collector/collector.c | 10 +++++++++- lib/ipfix_col.h | 2 +- lib/ipfix_col_jsonlines.c | 19 +++++++++++++------ 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/collector/collector.c b/collector/collector.c index cdf19d1..49a85f3 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -77,6 +77,7 @@ typedef struct ipfix_collector_opts int jsonexport; /* flag */ char *jsonfile; /* filename */ + int json_record_unknown_sets; /* boolean */ int udp; /* support udp clients */ int tcp; /* support tcp packets */ @@ -123,6 +124,7 @@ static void usage( char *taskname) "jsonlines options:\n" " --json export JSON to a file; one JSON doc/line\n" " --jsonfile file to append to, or '-' for stdout\n" + " --json-record-unknown-sets include bytes of sets dropped due to no template\n" #endif #ifdef DBSUPPORT #ifdef HAVE_GETOPT_LONG @@ -241,7 +243,8 @@ int do_collect() #endif #ifdef JSONLINESSUPPORT if ( par.jsonexport ) { - if ( ipfix_col_init_jsonlinesexport( par.jsonfile ) < 0 ) { + if ( ipfix_col_init_jsonlinesexport( par.jsonfile, + par.json_record_unknown_sets ) < 0 ) { mlogf( 0, "[%s] cannot use jsonlines (WHY?)\n", par.progname ); return -1; } @@ -374,6 +377,7 @@ int main (int argc, char *argv[]) { "help", 0, 0, 0}, { "json", 0, 0, 0}, { "jsonfile", 1, 0, 0}, + { "json-record-unknown-sets", 0, 0, 0}, { 0, 0, 0, 0 } }; #endif @@ -401,6 +405,7 @@ int main (int argc, char *argv[]) par.dbpw_filename = NULL; par.jsonexport = 0; par.jsonfile = "-"; + par.json_record_unknown_sets = 0; snprintf( par.progname, sizeof(par.progname), "%s", basename( argv[0]) ); @@ -458,6 +463,9 @@ int main (int argc, char *argv[]) case 13: /* jsonfile */ par.jsonfile = optarg; break; + case 14: /* json-record-unknown-sets */ + par.json_record_unknown_sets = 1; + break; } break; diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index 918d2c4..7132299 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -118,7 +118,7 @@ int ipfix_col_init_fileexport( char *datadir ); void ipfix_col_stop_fileexport( void ); int ipfix_col_init_mysqlexport( char *host, char *user, char *pw, char *name ); void ipfix_col_stop_mysqlexport( void ); -int ipfix_col_init_jsonlinesexport( char *jsonfile ); +int ipfix_col_init_jsonlinesexport( char *jsonfile, int json_record_unknown_sets ); void ipfix_col_stop_jsonlinesexport( void ); void ipfix_col_reload_jsonlinesexport( void ); int ipfix_col_register_export( ipfix_col_info_t *colinfo ); diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index 0de49aa..97091eb 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -43,6 +43,7 @@ typedef struct ipfix_export_data_jsonlines char *json_filename; FILE *json_file; char message_timestamp_str[JSON_MESSAGE_TIMESTAMP_SIZE]; + int json_record_unknown_sets; } ipfixe_data_jsonlines_t; /*------ globals ---------------------------------------------------------*/ @@ -281,14 +282,17 @@ int ipfix_export_notify_no_template_for_set_jsonlines( ", \"ipfix_collector_notice\":\"no_template_for_set\"" ", \"ipfix_template_id\":\"%d\"" ", \"ipfix_exporter_ip\":\"%s\"" - ", \"summary\":\"no template for %d, skip data set\"" - ", \"set_bytes\":", + ", \"summary\":\"no template for %d, skip data set\"", data->message_timestamp_str, template_id, ipfix_col_input_get_ident( source->input ), template_id); - json_render_bytes_as_hexpairs_to_FILE(data->json_file, set_start, set_len); + if (data->json_record_unknown_sets) + { + fprintf(data->json_file, ", \"set_bytes\":"); + json_render_bytes_as_hexpairs_to_FILE(data->json_file, set_start, set_len); + } fprintf(data->json_file, "}\n"); @@ -332,7 +336,7 @@ int ipfix_export_newmsg_jsonlines(ipfixs_node_t * s, ipfix_hdr_t * hdr, void * a return 0; } -int ipfix_export_init_jsonlines( char *jsonfile, void **arg ) +int ipfix_export_init_jsonlines( char *jsonfile, int json_record_unknown_sets, void **arg ) { #ifdef JSONLINESSUPPORT ipfixe_data_jsonlines_t *data; @@ -342,6 +346,7 @@ int ipfix_export_init_jsonlines( char *jsonfile, void **arg ) data->json_filename = jsonfile; data->json_file = NULL; + data->json_record_unknown_sets = json_record_unknown_sets; *arg = (void**)data; #endif @@ -380,12 +385,14 @@ void ipfix_export_cleanup_jsonlines( void *arg ) /*----- export funcs -----------------------------------------------------*/ -int ipfix_col_init_jsonlinesexport( char *jsonfile ) +int ipfix_col_init_jsonlinesexport( + char *jsonfile, + int json_record_unknown_sets) { #ifdef JSONLINESSUPPORT void *data; - if ( ipfix_export_init_jsonlines(jsonfile, &data) <0 ) { + if ( ipfix_export_init_jsonlines(jsonfile, json_record_unknown_sets, &data) <0 ) { return -1; } From 5cb748041b08b3aa9ed470901639ca762d29acf7 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Tue, 10 Mar 2015 06:57:02 +1300 Subject: [PATCH 37/48] Ignore *.swp files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 40e5cba..a75db68 100644 --- a/.gitignore +++ b/.gitignore @@ -20,3 +20,4 @@ libipfix.pc netscaler_ipfix.pw data.json autom4te.cache +*.swp From 41cffa2f9637ce75b2a17528b833edfe9c889c18 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Tue, 10 Mar 2015 07:09:17 +1300 Subject: [PATCH 38/48] Corrected some faulty #ifdef nesting in collector.c --- collector/collector.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/collector/collector.c b/collector/collector.c index 49a85f3..8801536 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -127,7 +127,7 @@ static void usage( char *taskname) " --json-record-unknown-sets include bytes of sets dropped due to no template\n" #endif #ifdef DBSUPPORT -#ifdef HAVE_GETOPT_LONG +# ifdef HAVE_GETOPT_LONG "db options:\n" " --db export into database\n" " --dbhost db host\n" @@ -135,8 +135,9 @@ static void usage( char *taskname) " --dbuser db user\n" " --dbpw db password\n" " --dbpw-filename db password from first line of file\n" -#else +# else " -d export into database\n" +# endif #endif #ifdef SSLSUPPORT "ssl options:\n" @@ -145,7 +146,6 @@ static void usage( char *taskname) " --cert certificate file to use\n" " --cafile file of CAs\n" " --cadir directory of CAs\n" -#endif #endif "\n"; From 7986526b3c2a76aabc7e8983e7966571c33a2cc3 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Wed, 11 Mar 2015 13:55:50 +1300 Subject: [PATCH 39/48] Resolve #24 Fall back to selected set of templates --- collector/collector.c | 25 +- configure | 26 +- configure.ac | 16 + lib/Makefile.in | 2 +- lib/ipfix.h | 3 +- lib/ipfix_col.c | 129 +++- lib/ipfix_col.h | 18 + lib/ipfix_col_jsonlines.c | 19 +- lib/ipfix_fallback_templates_netscaler.c | 731 +++++++++++++++++++++++ lib/ipfix_fallback_templates_netscaler.h | 9 + 10 files changed, 959 insertions(+), 19 deletions(-) create mode 100644 lib/ipfix_fallback_templates_netscaler.c create mode 100644 lib/ipfix_fallback_templates_netscaler.h diff --git a/collector/collector.c b/collector/collector.c index 8801536..13f406e 100644 --- a/collector/collector.c +++ b/collector/collector.c @@ -59,6 +59,8 @@ #define KEYFILE "server.pem" #define CERTFILE "server.pem" +#define FALLBACK_TEMPLATES_MAX 3 + /*------ stuctures -------------------------------------------------------*/ typedef struct ipfix_collector_opts @@ -78,7 +80,8 @@ typedef struct ipfix_collector_opts int jsonexport; /* flag */ char *jsonfile; /* filename */ int json_record_unknown_sets; /* boolean */ - + char *fallback_templates[FALLBACK_TEMPLATES_MAX]; /* optional ordered array of "netscaler" (only, currently) */ + int fallback_templates_used; int udp; /* support udp clients */ int tcp; /* support tcp packets */ int sctp; /* support sctp clients */ @@ -146,6 +149,10 @@ static void usage( char *taskname) " --cert certificate file to use\n" " --cafile file of CAs\n" " --cadir directory of CAs\n" +#endif +#ifdef FALLBACK_TEMPLATES_SUPPORT + "fallback templates:\n" + " --fallback-templates=netscaler\n" #endif "\n"; @@ -361,6 +368,7 @@ int main (int argc, char *argv[]) char arg; /* short options: character */ int loptidx=0; /* long options: arg==0 and index */ char opt[] = "64stuhl:p:vo:"; + int i; #ifdef HAVE_GETOPT_LONG struct option lopt[] = { { "db", 0, 0, 0}, @@ -378,6 +386,7 @@ int main (int argc, char *argv[]) { "json", 0, 0, 0}, { "jsonfile", 1, 0, 0}, { "json-record-unknown-sets", 0, 0, 0}, + { "fallback-templates", 1, 0, 0}, { 0, 0, 0, 0 } }; #endif @@ -406,6 +415,7 @@ int main (int argc, char *argv[]) par.jsonexport = 0; par.jsonfile = "-"; par.json_record_unknown_sets = 0; + par.fallback_templates_used = 0; snprintf( par.progname, sizeof(par.progname), "%s", basename( argv[0]) ); @@ -466,6 +476,11 @@ int main (int argc, char *argv[]) case 14: /* json-record-unknown-sets */ par.json_record_unknown_sets = 1; break; + case 15: /* fallback-templates= */ + if (par.fallback_templates_used < FALLBACK_TEMPLATES_MAX - 1) { + par.fallback_templates[par.fallback_templates_used] = optarg; /* TODO should get comma-split */ + par.fallback_templates_used++; + } } break; @@ -587,6 +602,14 @@ int main (int argc, char *argv[]) fprintf( stderr, "ipfix_add_vendor_information_elements() failed adding Netscaler reverse types: %s\n", strerror(errno) ); exit(1); } + for ( i=0; i < par.fallback_templates_used; i++ ) + { + if ( ipfix_col_add_fallback_templates( par.fallback_templates[i] ) < 0 ) + { + fprintf( stderr, "ipfix_col_add_fallback_templates() failed adding fallback templates for '%s'\n", par.fallback_templates[i] ); + exit(1); + } + } /** signal handler diff --git a/configure b/configure index 10f2f07..01fc83b 100755 --- a/configure +++ b/configure @@ -646,7 +646,8 @@ IPFIX_DB_OBJ MYSQLLIBS IPFIX_SSL_OBJ SSLLIBS -IPFIX_JSONLINES_OBJ +IPFIX_JSONLINES_OBJS +IPFIX_FALLBACK_TEMPLATES_OBJS SCTPLIBS OPENSSL INSTALL_DATA @@ -713,6 +714,7 @@ ac_user_opts=' enable_option_checking enable_ipv6 enable_sctp +enable_fallback_templates enable_jsonlines with_ssl with_mysql @@ -1360,6 +1362,11 @@ Optional Features: --enable-sctp enable sctp support --disable-sctp disable sctp support + --enable-fallback-templates + enable fallback-templates support + --disable-fallback-templates + disable fallback-templates support + --enable-jsonlines enable jsonlines support --disable-jsonlines disable jsonlines support @@ -3102,6 +3109,21 @@ fi +# +# FALLBACK TEMPLATES support +################################################# +# Check whether --enable-fallback-templates was given. +if test "${enable_fallback_templates+set}" = set; then + enableval=$enable_fallback_templates; + if test $enableval != "no" ; then + CPPFLAGS="-DFALLBACK_TEMPLATES_SUPPORT $CPPFLAGS" + IPFIX_FALLBACK_TEMPLATES_OBJS="$IPFIX_FALLBACK_TEMPLATES_OBSJ ipfix_fallback_templates_netscaler.o" + fi + +fi + + + # # JSONLINES support ################################################# @@ -3110,7 +3132,7 @@ if test "${enable_jsonlines+set}" = set; then enableval=$enable_jsonlines; if test $enableval != "no" ; then CPPFLAGS="-DJSONLINESSUPPORT $CPPFLAGS" - IPFIX_JSONLINES_SOURCES="ipfix_col_jsonlines.o json_util.o" + IPFIX_JSONLINES_OBJS="ipfix_jsonlines.o ipfix_col_jsonlines.o json_out.o" fi fi diff --git a/configure.ac b/configure.ac index f5223ea..2b1ac12 100644 --- a/configure.ac +++ b/configure.ac @@ -63,6 +63,22 @@ AS_HELP_STRING([--disable-sctp],[disable sctp support])], []) AC_SUBST(SCTPLIBS) +# +# FALLBACK TEMPLATES support +################################################# +AC_ARG_ENABLE(fallback-templates, +[ +AS_HELP_STRING([--enable-fallback-templates],[enable fallback-templates support]) +AS_HELP_STRING([--disable-fallback-templates],[disable fallback-templates support])], +[ + if test $enableval != "no" ; then + CPPFLAGS="-DFALLBACK_TEMPLATES_SUPPORT $CPPFLAGS" + IPFIX_FALLBACK_TEMPLATES_OBJS="$IPFIX_FALLBACK_TEMPLATES_OBSJ ipfix_fallback_templates_netscaler.o" + fi +], +[]) +AC_SUBST(IPFIX_FALLBACK_TEMPLATES_OBJS) + # # JSONLINES support ################################################# diff --git a/lib/Makefile.in b/lib/Makefile.in index 42a8ca3..920088d 100644 --- a/lib/Makefile.in +++ b/lib/Makefile.in @@ -43,7 +43,7 @@ CFLAGS = $(CCOPT) $(INCLS) $(DEFS) TARGETS = ipfix_reverse_fields.h ipfix_def_fokus.h ipfix_fields_fokus.h ipfix_def_netscaler.h ipfix_fields_netscaler.h ipfix_reverse_fields_netscaler.h libipfix.a libipfix.so SOURCES = ipfix.c ipfix_col.c ipfix_col_jsonlines.c json_out.c ipfix_col_db.c ipfix_col_files.c ipfix_print.c -OBJECTS = $(SOURCES:.c=.o) @IPFIX_JSONLINES_OBJ@ @IPFIX_DB_OBJ@ @IPFIX_SSL_OBJ@ +OBJECTS = $(SOURCES:.c=.o) @IPFIX_JSONLINES_OBJS@ @IPFIX_DB_OBJ@ @IPFIX_SSL_OBJ@ @IPFIX_FALLBACK_TEMPLATES_OBJS@ DHPARAMS = dh512.pem dh1024.pem CLEANFILES = $(TARGETS) *.d *.o *.so *.so.$(VERSION) DISTCLEANFILES = $(CLEANFILES) $(DHPARAMS) Makefile diff --git a/lib/ipfix.h b/lib/ipfix.h index 43ffd66..88e0980 100644 --- a/lib/ipfix.h +++ b/lib/ipfix.h @@ -17,8 +17,9 @@ extern "C" { #endif #include -#include #include +#include +#include #ifndef ENOTSUP #define ENOTSUP EOPNOTSUPP diff --git a/lib/ipfix_col.c b/lib/ipfix_col.c index 641e383..629f79a 100644 --- a/lib/ipfix_col.c +++ b/lib/ipfix_col.c @@ -53,6 +53,9 @@ # include "ipfix_jsonlines.h" #endif #include "ipfix_col.h" +#ifdef FALLBACK_TEMPLATES_SUPPORT +# include "ipfix_fallback_templates_netscaler.h" +#endif /*----- defines ----------------------------------------------------------*/ @@ -126,6 +129,8 @@ mptimer_t g_mt; /* timer */ ipfix_col_info_t *g_colinfo =NULL; +ipfixt_node_t *g_fallback_templates = NULL; + #ifdef SCTPSUPPORT sctp_assoc_node_t *sctp_assocs = NULL; /* sctp associations */ #endif @@ -1056,20 +1061,34 @@ int ipfix_parse_msg( ipfix_input_t *input, */ ipfixt_node_t *t; - if ( (t=_get_ipfixt( s->templates, setid )) ==NULL ) { - mlogf( 0, "[%s] no template for %d, skip data set\n", - func, setid ); + /** + * if we can lookup a regular template as advised by protocol, + * or if we can apply a fallback template */ - for ( e=g_exporter; e!=NULL; e=e->next ) { - if ( e->elem->export_notify_no_template_for_set ) - (void) e->elem->export_notify_no_template_for_set( - setid, s, buf+nread, setlen, e->elem->data ); - } + t = _get_ipfixt(s->templates, setid); - nread += setlen; - err_flag = 1; - } - else { + if ( t != NULL ) + { + for ( e = g_exporter; e != NULL; e = e->next ) { + if ( e->elem->export_template_source ) + (void) e->elem->export_template_source( setid, IPFIX_TEMPLATE_SOURCE_PROTOCOL, e->elem->data ); + } + } + else + { + t = _get_ipfixt(g_fallback_templates, setid); + + if ( t != NULL ) + { + for ( e = g_exporter; e != NULL; e = e->next ) { + if ( e->elem->export_template_source ) + (void) e->elem->export_template_source( setid, IPFIX_TEMPLATE_SOURCE_FALLBACK, e->elem->data ); + } + } + } + + if ( t != NULL ) + { for ( e=g_exporter; e!=NULL; e=e->next ) { if ( e->elem->export_dset ) (void) e->elem->export_dset( t, buf+nread, setlen, @@ -1097,6 +1116,22 @@ int ipfix_parse_msg( ipfix_input_t *input, func, i+1, bytesleft ); } nread += setlen; + } + + /** otherwise, we don't have a template and will likely drop the packet */ + else + { + mlogf( 0, "[%s] no template for %d, skip data set\n", + func, setid ); + + for ( e=g_exporter; e!=NULL; e=e->next ) { + if ( e->elem->export_notify_no_template_for_set ) + (void) e->elem->export_notify_no_template_for_set( + setid, s, buf+nread, setlen, e->elem->data ); + } + + nread += setlen; + err_flag = 1; } } else { @@ -2026,6 +2061,76 @@ int ipfix_get_template_ident( ipfix_template_t *t, return 0; } +/* + * name: ipfix_col_add_fallback_templates() + * parameters: a name such as 'netscaler', which will be used to reference an internal + * set of products that are known about. (currently just netscaler) + * return: 0 or -1 on failure. + * remarks: + */ + +int ipfix_col_add_fallback_templates( const char *vendor_or_product_name ) +{ +#ifdef FALLBACK_TEMPLATES_SUPPORT + ipfixt_node_t *new; + int t; + int f; + + if (strcmp(vendor_or_product_name, "netscaler") == 0) + { + for (t = 0; t < netscaler_fallback_template_count; t++) + { + if ((new = calloc(1, sizeof(*new))) == NULL) + return -1; + + new->expire_time = (time_t) (-1); + /** TODO what to do with ident */ +#ifdef DBSUPPORT +# warning Expect bugs here as the database fields havent been set +#endif + + if ((new->ipfixt = calloc(1, sizeof(*(new->ipfixt)))) == NULL) + return -1; + + new->ipfixt->type = DATA_TEMPLATE; + new->ipfixt->odid = 0; + new->ipfixt->tid = netscaler_fallback_templates[t].template_id; + new->ipfixt->ndatafields = netscaler_fallback_templates[t].field_count; + new->ipfixt->nscopefields = 0; + new->ipfixt->nfields = new->ipfixt->ndatafields + new->ipfixt->nscopefields; + new->ipfixt->maxfields = new->ipfixt->nfields; + + new->ipfixt->fields = calloc(new->ipfixt->maxfields, sizeof(*(new->ipfixt->fields))); + if ( new->ipfixt->fields == NULL ) + return -1; + + for (f=0; fipfixt->fields[f].elem = ipfix_get_ftinfo( + netscaler_fallback_templates[t].fields[f].eno, + netscaler_fallback_templates[t].fields[f].ienum); + if (new->ipfixt->fields[f].elem == NULL) + { + new->ipfixt->fields[f].elem = ipfix_create_unknown_ftinfo( + netscaler_fallback_templates[t].fields[f].eno, + netscaler_fallback_templates[t].fields[f].ienum); + new->ipfixt->fields[f].unknown_f = 0; /* fixed fallback */ + } + new->ipfixt->fields[f].flength = + netscaler_fallback_templates[t].fields[f].length; + } + + new->next = g_fallback_templates; + g_fallback_templates = new; + } + } + else + { + return -1; + } +#endif + return 0; +} /* * name: ipfix_col_cleanup() diff --git a/lib/ipfix_col.h b/lib/ipfix_col.h index 7132299..aaac13d 100644 --- a/lib/ipfix_col.h +++ b/lib/ipfix_col.h @@ -12,6 +12,7 @@ #ifndef IPFIX_COL_H #define IPFIX_COL_H +#include #include #include #include @@ -34,6 +35,11 @@ extern "C" { #define MAXTEMPLIDENT 240 +typedef enum { + IPFIX_TEMPLATE_SOURCE_PROTOCOL = 1, + IPFIX_TEMPLATE_SOURCE_FALLBACK +} ipfix_template_source_t; + typedef enum { IPFIX_INPUT_FILE, IPFIX_INPUT_IPCON } ipfix_input_type_t; @@ -64,6 +70,14 @@ typedef struct ipfixt_node #endif } ipfixt_node_t; +#ifdef FALLBACK_TEMPLATES_SUPPORT +typedef struct { + uint16_t template_id; + uint16_t field_count; + export_fields_t fields[50]; /* FIXME: can't have a flexible array nested inside another flexible array, so overestimate */ +} fallback_template_t; +#endif + typedef struct ipfixs_node { struct ipfixs_node *next; @@ -97,6 +111,7 @@ typedef struct ipfix_col_info ipfix_datarecord_t*,void*,ipfix_input_t*); int (*export_rawmsg)(ipfixs_node_t *source, const uint8_t* data, size_t len, void *arg); int (*export_notify_no_template_for_set)(int,ipfixs_node_t*,const uint8_t*,size_t,void*); + void (*export_template_source)(int template_id, ipfix_template_source_t source, void *arg); void (*export_cleanup)(void*); void (*export_reload)(void*); void *data; @@ -146,6 +161,9 @@ int ipfix_col_close_ssl( ipfix_col_t *handle ); const char *ipfix_col_input_get_ident( ipfix_input_t *input ); +int ipfix_col_add_fallback_templates( const char *vendor_or_product_name ); + + #ifdef DBSUPPORT # include #endif diff --git a/lib/ipfix_col_jsonlines.c b/lib/ipfix_col_jsonlines.c index 97091eb..efc8925 100644 --- a/lib/ipfix_col_jsonlines.c +++ b/lib/ipfix_col_jsonlines.c @@ -44,6 +44,7 @@ typedef struct ipfix_export_data_jsonlines FILE *json_file; char message_timestamp_str[JSON_MESSAGE_TIMESTAMP_SIZE]; int json_record_unknown_sets; + ipfix_template_source_t template_source; } ipfixe_data_jsonlines_t; /*------ globals ---------------------------------------------------------*/ @@ -127,6 +128,8 @@ int ipfix_export_drecord_jsonlines( ipfixs_node_t *s, fprintf(data->json_file, ", \"ipfix_template_id\":\"%d\"", t->ipfixt->tid); + fprintf(data->json_file, ", \"ipfix_template_source\":\"%s\"", data->template_source ? "fallback" : "protocol"); + /* TODO The first attribute should be the template number. */ @@ -331,10 +334,22 @@ int ipfix_export_newmsg_jsonlines(ipfixs_node_t * s, ipfix_hdr_t * hdr, void * a strftime(data->message_timestamp_str, JSON_MESSAGE_TIMESTAMP_SIZE, "%Y-%m-%dT%H:%M:%SZ", ×tamp_tm); - #endif return 0; } + +void ipfix_export_template_source_jsonlines(int template_id, ipfix_template_source_t source, void * arg) +{ +#ifdef JSONLINESSUPPORT + ipfixe_data_jsonlines_t *data = (ipfixe_data_jsonlines_t*)arg; + + fprintf(stderr, "Template source is %s for template ID %d\n", + (source == IPFIX_TEMPLATE_SOURCE_PROTOCOL) ? "protocol" : "fallback", template_id); + + data->template_source = source; +#endif +} + int ipfix_export_init_jsonlines( char *jsonfile, int json_record_unknown_sets, void **arg ) { @@ -382,7 +397,6 @@ void ipfix_export_cleanup_jsonlines( void *arg ) #endif } - /*----- export funcs -----------------------------------------------------*/ int ipfix_col_init_jsonlinesexport( @@ -407,6 +421,7 @@ int ipfix_col_init_jsonlinesexport( g_colinfo->export_newsource = ipfix_export_newsource_jsonlines; g_colinfo->export_notify_no_template_for_set = ipfix_export_notify_no_template_for_set_jsonlines; g_colinfo->export_newmsg = ipfix_export_newmsg_jsonlines; + g_colinfo->export_template_source = ipfix_export_template_source_jsonlines; g_colinfo->data = data; diff --git a/lib/ipfix_fallback_templates_netscaler.c b/lib/ipfix_fallback_templates_netscaler.c new file mode 100644 index 0000000..f4ee1b6 --- /dev/null +++ b/lib/ipfix_fallback_templates_netscaler.c @@ -0,0 +1,731 @@ +#include "lib/ipfix.h" +#include "lib/ipfix_col.h" + +int netscaler_fallback_template_count = 29; + +fallback_template_t netscaler_fallback_templates[] = + { { 256 + , 23 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + } + } + , { 257 + , 24 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 5951, 128, 4 } /* netscaler_round_trip_time */ + , { 0, 14, 4 } /* egressInterface */ + , { 0, 10, 4 } /* ingressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + } + } + , { 258 + , 37 /* FIXME some say 38 (with netscaler_aaa_username) and some (that usually need a fallback) imply 37 (without netscaler_aaa_username), so reducing to 37 */ + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 171, 4 } /* netscaler_app_unit_name_app_id */ + , { 5951, 158, 8 } /* netscaler_http_res_forw_fb */ + , { 5951, 170, 8 } /* netscaler_http_res_forw_lb */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + /* , { 5951, 205, 65535 } netscaler_aaa_username */ + , { 5951, 130, 65535 } /* netscaler_http_req_url */ + , { 5951, 131, 65535 } /* netscaler_http_req_cookie */ + , { 5951, 140, 65535 } /* netscaler_http_req_referer */ + , { 5951, 141, 65535 } /* netscaler_http_req_method */ + , { 5951, 142, 65535 } /* netscaler_http_req_host */ + , { 5951, 143, 65535 } /* netscaler_http_req_user_agent */ + , { 5951, 183, 65535 } /* netscaler_http_content_type */ + , { 5951, 185, 65535 } /* netscaler_http_req_authorization */ + , { 5951, 186, 65535 } /* netscaler_http_req_via */ + , { 5951, 190, 65535 } /* netscaler_http_req_x_forwarded_for */ + , { 5951, 267, 65535 } /* netscaler_http_domain_name */ + } + } + , { 259 + , 23 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 27, 16 } /* sourceIPv6Address */ + , { 0, 28, 16 } /* destinationIPv6Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + } + } + , { 260 + , 24 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 27, 16 } /* sourceIPv6Address */ + , { 0, 28, 16 } /* destinationIPv6Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 5951, 128, 4 } /* netscaler_round_trip_time */ + , { 0, 14, 4 } /* egressInterface */ + , { 0, 10, 4 } /* ingressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + } + } + , { 261 + , 40 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 27, 16 } /* sourceIPv6Address */ + , { 0, 28, 16 } /* destinationIPv6Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 171, 4 } /* netscaler_app_unit_name_app_id */ + , { 5951, 158, 8 } /* netscaler_http_res_forw_fb */ + , { 5951, 170, 8 } /* netscaler_http_res_forw_lb */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + , { 5951, 268, 4 } /* netscaler_cache_redir_client_connection_core_id */ + , { 5951, 269, 4 } /* netscaler_cache_redir_client_connection_transaction_id */ + , { 5951, 205, 65535 } /* netscaler_aaa_username */ + , { 5951, 130, 65535 } /* netscaler_http_req_url */ + , { 5951, 131, 65535 } /* netscaler_http_req_cookie */ + , { 5951, 140, 65535 } /* netscaler_http_req_referer */ + , { 5951, 141, 65535 } /* netscaler_http_req_method */ + , { 5951, 142, 65535 } /* netscaler_http_req_host */ + , { 5951, 143, 65535 } /* netscaler_http_req_user_agent */ + , { 5951, 183, 65535 } /* netscaler_http_content_type */ + , { 5951, 185, 65535 } /* netscaler_http_req_authorization */ + , { 5951, 186, 65535 } /* netscaler_http_req_via */ + , { 5951, 190, 65535 } /* netscaler_http_req_x_forwarded_for */ + , { 5951, 267, 65535 } /* netscaler_http_domain_name */ + } + } + , { 262 + , 39 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 144, 2 } /* netscaler_http_rsp_status */ + , { 5951, 145, 8 } /* netscaler_http_rsp_len */ + , { 5951, 146, 8 } /* netscaler_server_ttfb */ + , { 5951, 147, 8 } /* netscaler_server_ttlb */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 161, 4 } /* netscaler_main_page_id */ + , { 5951, 162, 4 } /* netscaler_main_page_coreid */ + , { 5951, 153, 8 } /* netscaler_http_req_rcv_fb */ + , { 5951, 156, 8 } /* netscaler_http_req_forw_fb */ + , { 5951, 157, 8 } /* netscaler_http_res_rcv_fb */ + , { 5951, 159, 8 } /* netscaler_http_req_rcv_lb */ + , { 5951, 160, 8 } /* netscaler_http_req_forw_lb */ + , { 5951, 169, 8 } /* netscaler_http_res_rcv_lb */ + , { 5951, 182, 4 } /* netscaler_client_rtt */ + , { 5951, 205, 65535 } /* netscaler_aaa_username */ + , { 5951, 183, 65535 } /* netscaler_http_content_type */ + , { 5951, 187, 65535 } /* netscaler_http_res_location */ + , { 5951, 188, 65535 } /* netscaler_http_res_set_cookie */ + , { 5951, 189, 65535 } /* netscaler_http_res_set_cookie2 */ + } + } + , { 263 + , 39 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 27, 16 } /* sourceIPv6Address */ + , { 0, 28, 16 } /* destinationIPv6Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 144, 2 } /* netscaler_http_rsp_status */ + , { 5951, 145, 8 } /* netscaler_http_rsp_len */ + , { 5951, 146, 8 } /* netscaler_server_ttfb */ + , { 5951, 147, 8 } /* netscaler_server_ttlb */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 161, 4 } /* netscaler_main_page_id */ + , { 5951, 162, 4 } /* netscaler_main_page_coreid */ + , { 5951, 153, 8 } /* netscaler_http_req_rcv_fb */ + , { 5951, 156, 8 } /* netscaler_http_req_forw_fb */ + , { 5951, 157, 8 } /* netscaler_http_res_rcv_fb */ + , { 5951, 159, 8 } /* netscaler_http_req_rcv_lb */ + , { 5951, 160, 8 } /* netscaler_http_req_forw_lb */ + , { 5951, 169, 8 } /* netscaler_http_res_rcv_lb */ + , { 5951, 182, 4 } /* netscaler_client_rtt */ + , { 5951, 205, 65535 } /* netscaler_aaa_username */ + , { 5951, 183, 65535 } /* netscaler_http_content_type */ + , { 5951, 187, 65535 } /* netscaler_http_res_location */ + , { 5951, 188, 65535 } /* netscaler_http_res_set_cookie */ + , { 5951, 189, 65535 } /* netscaler_http_res_set_cookie2 */ + } + } + , { 264 + , 5 + , { { 5951, 134, 1 } /* netscaler_syslog_priority */ + , { 0, 210, 1 } /* paddingOctets */ + , { 0, 210, 2 } /* paddingOctets */ + , { 5951, 136, 4 } /* netscaler_syslog_timestamp */ + , { 5951, 135, 65535 } /* netscaler_syslog_message */ + } + } + , { 266 + , 4 + , { { 0, 144, 4 } /* exportingProcessId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 168, 65535 } /* netscaler_http_client_interaction_end_time */ + , { 5951, 163, 65535 } /* netscaler_http_client_interaction_start_time */ + } + } + , { 267 + , 4 + , { { 0, 144, 4 } /* exportingProcessId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 164, 65535 } /* netscaler_http_client_render_end_time */ + , { 5951, 165, 65535 } /* netscaler_http_client_render_start_time */ + } + } + , { 269 + , 25 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 61, 1 } /* flowDirection */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 174, 1 } /* netscaler_db_protocol_name */ + , { 5951, 173, 1 } /* netscaler_db_req_type */ + , { 0, 210, 2 } /* paddingOctets */ + , { 5951, 178, 65535 } /* netscaler_db_req_string */ + } + } + , { 270 + , 29 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 61, 1 } /* flowDirection */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 146, 8 } /* netscaler_server_ttfb */ + , { 5951, 147, 8 } /* netscaler_server_ttlb */ + , { 5951, 174, 1 } /* netscaler_db_protocol_name */ + , { 0, 210, 1 } /* paddingOctets */ + , { 0, 210, 2 } /* paddingOctets */ + , { 5951, 180, 8 } /* netscaler_db_resp_status */ + , { 5951, 181, 8 } /* netscaler_db_resp_length */ + , { 5951, 179, 65535 } /* netscaler_db_resp_status_string */ + } + } + , { 271 + , 26 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 5951, 200, 16 } /* netscaler_ica_session_guid */ + , { 5951, 247, 4 } /* netscaler_ica_device_serial_no */ + , { 5951, 248, 16 } /* netscaler_msi_client_cookie */ + , { 5951, 249, 8 } /* netscaler_ica_flags */ + , { 5951, 209, 4 } /* netscaler_ica_session_setup_time */ + , { 5951, 203, 4 } /* netscaler_ica_client_ip */ + , { 5951, 202, 2 } /* netscaler_ica_client_type */ + , { 5951, 208, 2 } /* netscaler_ica_client_launcher */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + , { 5951, 201, 65535 } /* NETSCALE_ICA_CLIENT_VERSION */ + , { 5951, 204, 65535 } /* netscaler_ica_client_hostname */ + , { 5951, 250, 65535 } /* netscaler_ica_username */ + , { 5951, 207, 65535 } /* netscaler_ica_domain_name */ + , { 5951, 210, 65535 } /* netscaler_ica_server_name */ + } + } + , { 272 + , 34 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 5951, 200, 16 } /* netscaler_ica_session_guid */ + , { 5951, 247, 4 } /* netscaler_ica_device_serial_no */ + , { 5951, 248, 16 } /* netscaler_msi_client_cookie */ + , { 5951, 249, 8 } /* netscaler_ica_flags */ + , { 5951, 214, 1 } /* netscaler_ica_session_reconnects */ + , { 5951, 215, 4 } /* netscaler_ica_rtt */ + , { 5951, 216, 4 } /* netscaler_ica_client_side_rx_bytes */ + , { 5951, 217, 4 } /* netscaler_ica_client_side_tx_bytes */ + , { 5951, 219, 2 } /* netscaler_ica_client_side_packets_retransmit */ + , { 5951, 220, 2 } /* netscaler_ica_server_side_packets_retransmit */ + , { 5951, 221, 4 } /* netscaler_ica_client_side_rtt */ + , { 5951, 222, 4 } /* netscaler_ica_server_side_rtt */ + , { 5951, 243, 4 } /* netscaler_ica_client_side_jitter */ + , { 5951, 244, 4 } /* netscaler_ica_server_side_jitter */ + , { 5951, 254, 4 } /* netscaler_ica_network_update_start_time */ + , { 5951, 255, 4 } /* netscaler_ica_network_update_end_time */ + , { 5951, 256, 4 } /* netscaler_ica_client_side_srtt */ + , { 5951, 257, 4 } /* netscaler_ica_server_side_srtt */ + , { 5951, 258, 4 } /* netscaler_ica_client_side_delay */ + , { 5951, 259, 4 } /* netscaler_ica_server_side_delay */ + , { 5951, 260, 4 } /* netscaler_ica_host_delay */ + , { 5951, 261, 2 } /* netscaler_ica_clientside_window_size */ + , { 5951, 262, 2 } /* netscaler_ica_server_side_window_size */ + , { 5951, 263, 2 } /* netscaler_ica_client_side_rto_count */ + , { 5951, 264, 2 } /* netscaler_ica_server_side_rto_count */ + , { 5951, 265, 4 } /* netscaler_ica_l7_client_latency */ + , { 5951, 266, 4 } /* netscaler_ica_l7_server_latency */ + } + } + , { 273 + , 23 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 5951, 200, 16 } /* netscaler_ica_session_guid */ + , { 5951, 247, 4 } /* netscaler_ica_device_serial_no */ + , { 5951, 248, 16 } /* netscaler_msi_client_cookie */ + , { 5951, 249, 8 } /* netscaler_ica_flags */ + , { 5951, 223, 4 } /* netscaler_ica_session_update_begin_sec */ + , { 5951, 224, 4 } /* netscaler_ica_session_update_end_sec */ + , { 5951, 225, 4 } /* netscaler_ica_channel_id_1 */ + , { 5951, 226, 4 } /* netscaler_ica_channel_id_1_bytes */ + , { 5951, 227, 4 } /* netscaler_ica_channel_id_2 */ + , { 5951, 228, 4 } /* netscaler_ica_channel_id_2_bytes */ + , { 5951, 229, 4 } /* netscaler_ica_channel_id_3 */ + , { 5951, 230, 4 } /* netscaler_ica_channel_id_3_bytes */ + , { 5951, 231, 4 } /* netscaler_ica_channel_id_4 */ + , { 5951, 232, 4 } /* netscaler_ica_channel_id_4_bytes */ + , { 5951, 233, 4 } /* netscaler_ica_channel_id_5 */ + , { 5951, 234, 4 } /* netscaler_ica_channel_id_5_bytes */ + } + } + , { 274 + , 22 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 5951, 200, 16 } /* netscaler_ica_session_guid */ + , { 5951, 247, 4 } /* netscaler_ica_device_serial_no */ + , { 5951, 248, 16 } /* netscaler_msi_client_cookie */ + , { 5951, 249, 8 } /* netscaler_ica_flags */ + , { 5951, 235, 2 } /* netscaler_ica_connection_priority */ + , { 5951, 236, 4 } /* netscaler_application_startup_duration */ + , { 5951, 237, 2 } /* netscaler_ica_launch_mechanism */ + , { 5951, 239, 4 } /* netscaler_application_startup_time */ + , { 5951, 245, 4 } /* netscaler_ica_app_process_id */ + , { 5951, 238, 65535 } /* netscaler_ica_application_name */ + , { 5951, 246, 65535 } /* netscaler_ica_app_module_path */ + } + } + , { 275 + , 14 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 5951, 200, 16 } /* netscaler_ica_session_guid */ + , { 5951, 247, 4 } /* netscaler_ica_device_serial_no */ + , { 5951, 248, 16 } /* netscaler_msi_client_cookie */ + , { 5951, 249, 8 } /* netscaler_ica_flags */ + , { 5951, 240, 2 } /* netscaler_ica_application_termination_type */ + , { 5951, 245, 4 } /* netscaler_ica_app_process_id */ + , { 5951, 241, 4 } /* netscaler_ica_application_termination_time */ + } + } + , { 276 + , 12 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 5951, 200, 16 } /* netscaler_ica_session_guid */ + , { 5951, 247, 4 } /* netscaler_ica_device_serial_no */ + , { 5951, 248, 16 } /* netscaler_msi_client_cookie */ + , { 5951, 249, 8 } /* netscaler_ica_flags */ + , { 5951, 242, 4 } /* netscaler_ica_session_end_time */ + } + } + , { 277 + , 5 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 5951, 251, 1 } /* netscaler_license_type */ + , { 5951, 252, 8 } /* netscaler_max_license_count */ + , { 5951, 253, 8 } /* netscaler_current_license_consumed */ + } + } + , { 278 + , 24 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + , { 5951, 205, 65535 } /* netscaler_aaa_username */ + } + } + , { 279 + , 24 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 27, 16 } /* sourceIPv6Address */ + , { 0, 28, 16 } /* destinationIPv6Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 0, 10, 4 } /* ingressInterface */ + , { 0, 14, 4 } /* egressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + , { 5951, 205, 65535 } /* netscaler_aaa_username */ + } + } + , { 280 + , 24 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + } + } + , { 281 + , 40 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 0, 148, 8 } /* flowId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 0, 60, 1 } /* ipVersion */ + , { 0, 4, 1 } /* protocolIdentifier */ + , { 0, 210, 2 } /* paddingOctets */ + , { 0, 8, 4 } /* sourceIPv4Address */ + , { 0, 12, 4 } /* destinationIPv4Address */ + , { 0, 7, 2 } /* sourceTransportPort */ + , { 0, 11, 2 } /* destinationTransportPort */ + , { 0, 2, 8 } /* packetDeltaCount */ + , { 0, 1, 8 } /* octetDeltaCount */ + , { 0, 6, 1 } /* tcpControlBits */ + , { 5951, 132, 8 } /* netscaler_flow_flags */ + , { 0, 154, 8 } /* flowStartMicroseconds */ + , { 0, 155, 8 } /* flowEndMicroseconds */ + , { 5951, 128, 4 } /* netscaler_round_trip_time */ + , { 0, 14, 4 } /* egressInterface */ + , { 0, 10, 4 } /* ingressInterface */ + , { 5951, 151, 4 } /* netscaler_app_name_app_id */ + , { 5951, 192, 16 } /* netscaler_connection_chain_id */ + , { 5951, 193, 1 } /* netscaler_connection_chain_hop_count */ + , { 5951, 144, 2 } /* netscaler_http_rsp_status */ + , { 5951, 145, 8 } /* netscaler_http_rsp_len */ + , { 5951, 161, 4 } /* netscaler_main_page_id */ + , { 5951, 162, 4 } /* netscaler_main_page_coreid */ + , { 5951, 153, 8 } /* netscaler_http_req_rcv_fb */ + , { 5951, 156, 8 } /* netscaler_http_req_forw_fb */ + , { 5951, 157, 8 } /* netscaler_http_res_rcv_fb */ + , { 5951, 159, 8 } /* netscaler_http_req_rcv_lb */ + , { 5951, 160, 8 } /* netscaler_http_req_forw_lb */ + , { 5951, 169, 8 } /* netscaler_http_res_rcv_lb */ + , { 5951, 182, 4 } /* netscaler_client_rtt */ + , { 5951, 205, 65535 } /* netscaler_aaa_username */ + , { 5951, 183, 65535 } /* netscaler_http_content_type */ + , { 5951, 187, 65535 } /* netscaler_http_res_location */ + , { 5951, 188, 65535 } /* netscaler_http_res_set_cookie */ + , { 5951, 189, 65535 } /* netscaler_http_res_set_cookie2 */ + } + } + , { 282 + , 46 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 270, 4 } /* 5951_270 */ + , { 5951, 271, 4 } /* 5951_271 */ + , { 5951, 272, 4 } /* 5951_272 */ + , { 5951, 273, 4 } /* 5951_273 */ + , { 5951, 274, 4 } /* 5951_274 */ + , { 5951, 275, 4 } /* 5951_275 */ + , { 5951, 276, 4 } /* 5951_276 */ + , { 5951, 277, 4 } /* 5951_277 */ + , { 5951, 278, 4 } /* 5951_278 */ + , { 5951, 279, 4 } /* 5951_279 */ + , { 5951, 280, 4 } /* 5951_280 */ + , { 5951, 281, 4 } /* 5951_281 */ + , { 5951, 282, 4 } /* 5951_282 */ + , { 5951, 283, 4 } /* 5951_283 */ + , { 5951, 284, 4 } /* 5951_284 */ + , { 5951, 285, 4 } /* 5951_285 */ + , { 5951, 286, 4 } /* 5951_286 */ + , { 5951, 287, 4 } /* 5951_287 */ + , { 5951, 288, 4 } /* 5951_288 */ + , { 5951, 289, 4 } /* 5951_289 */ + , { 5951, 290, 4 } /* 5951_290 */ + , { 5951, 291, 4 } /* 5951_291 */ + , { 5951, 292, 4 } /* 5951_292 */ + , { 5951, 293, 4 } /* 5951_293 */ + , { 5951, 294, 4 } /* 5951_294 */ + , { 5951, 295, 4 } /* 5951_295 */ + , { 5951, 296, 4 } /* 5951_296 */ + , { 5951, 297, 4 } /* 5951_297 */ + , { 5951, 298, 4 } /* 5951_298 */ + , { 5951, 299, 4 } /* 5951_299 */ + , { 5951, 300, 4 } /* 5951_300 */ + , { 5951, 301, 4 } /* 5951_301 */ + , { 5951, 302, 4 } /* 5951_302 */ + , { 5951, 303, 4 } /* 5951_303 */ + , { 5951, 304, 4 } /* 5951_304 */ + , { 5951, 305, 4 } /* 5951_305 */ + , { 5951, 306, 4 } /* 5951_306 */ + , { 5951, 307, 4 } /* 5951_307 */ + , { 5951, 308, 4 } /* 5951_308 */ + , { 5951, 309, 4 } /* 5951_309 */ + , { 5951, 310, 4 } /* 5951_310 */ + , { 5951, 311, 4 } /* 5951_311 */ + , { 5951, 312, 4 } /* 5951_312 */ + } + } + , { 283 + , 7 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 315, 4 } /* 5951_315 */ + , { 5951, 313, 4 } /* 5951_313 */ + , { 5951, 314, 4 } /* 5951_314 */ + , { 5951, 316, 65535 } /* 5951_316 */ + } + } + , { 284 + , 1 + , { { 5951, 319, 8 } /* 5951_319 */ + } + } + , { 285 + , 3 + , { { 0, 138, 4 } /* observationPointId */ + , { 5951, 268, 4 } /* netscaler_cache_redir_client_connection_core_id */ + , { 5951, 269, 4 } /* netscaler_cache_redir_client_connection_transaction_id */ + } + } + , { 286 + , 5 + , { { 0, 138, 4 } /* observationPointId */ + , { 0, 144, 4 } /* exportingProcessId */ + , { 5951, 129, 4 } /* netscaler_transaction_id */ + , { 5951, 317, 4 } /* 5951_317 */ + , { 5951, 318, 4 } /* 5951_318 */ + } + } + }; + diff --git a/lib/ipfix_fallback_templates_netscaler.h b/lib/ipfix_fallback_templates_netscaler.h new file mode 100644 index 0000000..efba644 --- /dev/null +++ b/lib/ipfix_fallback_templates_netscaler.h @@ -0,0 +1,9 @@ +#ifndef __IPFIX_FALLBACK_TEMPLATES_NETSCALER_H +# define __IPFIX_FALLBACK_TEMPLATES_NETSCALER_H + +#include "ipfix_col.h" + +extern int netscaler_fallback_template_count; +extern fallback_template_t netscaler_fallback_templates[]; + +#endif From dbf2c33ee4989c4dcd9379c25e7eee4e9318afae Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 26 Mar 2015 15:04:56 +1300 Subject: [PATCH 40/48] Started on some release documentation --- README | 23 ------- README.md | 181 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 181 insertions(+), 23 deletions(-) delete mode 100644 README create mode 100644 README.md diff --git a/README b/README deleted file mode 100644 index 2cfc66c..0000000 --- a/README +++ /dev/null @@ -1,23 +0,0 @@ - -libipfix 1.0 - -This package contains source code for a library to export and -collect IPFIX measurement and accounting data. -The library is complemented with an IPFIX collector and -a basic IPFIX probe. - -The library supports IPFIX (draft-ietf-ipfix-protocol-24.txt, -draft-ietf-ipfix-info-15.txt, draft-ietf-psamp-info-05.txt) -and Netflow9 (RFC 3954) using TCP, UDP and SCTP as transport protocol. - -There are some small example programs containing code that -demonstrates how to use the library. - -Find more information at -http://libipfix.sourceforge.net/ - -The latest release is available from -http://sourceforge.net/projects/libipfix/ - -Please send inquiries/comments/reports to Carsten Schmoll - diff --git a/README.md b/README.md new file mode 100644 index 0000000..4682a77 --- /dev/null +++ b/README.md @@ -0,0 +1,181 @@ + +libipfix (forked version of 1.0 with many patches to make json-milestone-2 or so) + +This package contains source code for a library to export and +collect IPFIX measurement and accounting data. +The library is complemented with an IPFIX collector and +a basic IPFIX probe. + +The library supports IPFIX (draft-ietf-ipfix-protocol-24.txt, +draft-ietf-ipfix-info-15.txt, draft-ietf-psamp-info-05.txt) +and Netflow9 (RFC 3954) using TCP, UDP and SCTP as transport protocol. + +There are some small example programs containing code that +demonstrates how to use the library. + +Find more information at +http://libipfix.sourceforge.net/ (upstream; will not relate to this fork) + +The latest release is available from +http://sourceforge.net/projects/libipfix/ (upstream) +https://github.com/cameronkerrnz/libipfix (this fork) + +Please send inquiries/comments/reports about this fork to Cameron Kerr + +Compiling (for JSONlines output) +=== + +As for Red Hat Enterprise Linux 6. I hope to make an RPM for the next release. + +~~~ +sudo yum install "@Development Tools" libpcap-devel +git clone https://cameronkerrnz@github.com/cameronkerrnz/libipfix.git +cd libipfix +./configure --prefix=/opt/libipfix --enable-jsonlines --enable-fallback-templates +make +make # yes, a second time to work around some faulty Makefile rules (patch welcome!) +sudo make install +~~~ + +Running the software initially +=== + +Because the transcript above had the libraries installed in a non-standard place, we can set LD_LIBRARY_PATH, or add the path to /etc/ld.so.conf.d/libipfix, or ... I'm sure there's a more clever way of specifying this with the linker... + +~~~ +$ LD_LIBRARY_PATH=/opt/libipfix/lib /opt/libipfix/bin/ipfix_collector --help + +ipfix collector ($Revision: 1.12 $ Mar 26 2015) + +usage: ipfix_collector [options] + +options: + -h this help + -4 accept connections via AF_INET socket + -6 accept connections via AF_INET6 socket + -o store files of collected data in this dir + -p listen on this port (default=4739) + -s support SCTP clients + -t support TCP clients + -u support UDP clients + -v increase verbose level +jsonlines options: + --json export JSON to a file; one JSON doc/line + --jsonfile file to append to, or '-' for stdout + --json-record-unknown-sets include bytes of sets dropped due to no template +fallback templates: + --fallback-templates=netscaler + +example: ipfix_collector -stu -vv -o . +~~~ + +Let's test that we can receive our IPFIX / Appflow messages. Ensure you have configured your appliance to send to the port that you will listen to (the standard ipfix port is UDP/4739 -- ipfix is also specified on TCP and SCTP -- I have only tested / developed with UDP currently, because that is all that the Netscalers offer). + +It is useful to bear in mind that this traffic is both unencrypted and unauthenticated, other by whatever network layer resrictions you provide. + +We'll write the output to a file /tmp/data.json + +~~~ +$ LD_LIBRARY_PATH=/opt/libipfix/lib /opt/libipfix/bin/ipfix_collector -4 -u -vv --json --jsonfile /tmp/data.json --fallback-templates=netscaler +[ipfix_collector] listen on port 4739, write to stdout ... +[ipfix_collector] data goes to file /tmp/data.json as one JSON document per line +... you should soon see DATA RECORDS and TEMPLATE RECORDS flow up your screen with -vv +... +Template source is fallback for template ID 258 +DATA RECORD: + template id: 258 + nfields: 37 + observationPointId: 6914017 + exportingProcessId: 0 + flowId: 27059599 + netscaler_transaction_id: 2958993 + netscaler_connection_id: 27059599 + ipVersion: 4 + protocolIdentifier: 6 + paddingOctets: 0x0000 + sourceIPv4Address: «IP» + destinationIPv4Address: «IP» + sourceTransportPort: 50473 + destinationTransportPort: 443 + packetDeltaCount: 1 + octetDeltaCount: 682 + tcpControlBits: 24 + netscaler_flow_flags: 84025344 + flowStartMicroseconds: 15617890647616118639 + flowEndMicroseconds: 15617890647616118639 + ingressInterface: 2 + egressInterface: 2147483651 + netscaler_app_name_app_id: 9541 + netscaler_app_unit_name_app_id: 0 + netscaler_http_res_forw_fb: 0 + netscaler_http_res_forw_lb: 0 + netscaler_connection_chain_id: 0x00000000000000000000000000000000 + netscaler_connection_chain_hop_count: 0 + netscaler_http_req_url: «Request URL» + netscaler_http_req_cookie: «Cookies sent» + netscaler_http_req_referer: «HTTP referrer» + netscaler_http_req_method: GET + netscaler_http_req_host: «HTTP host header» + netscaler_http_req_user_agent: «HTTP user-agent string» + netscaler_http_content_type: + netscaler_http_req_authorization: + netscaler_http_req_via: + netscaler_http_req_x_forwarded_for: + netscaler_http_domain_name: +... +^C +[ipfix_collector] got signo 2, bye. +~~~ + +I've anonymised various fields. + +Now have a look at /tmp/data.json. Because its all one long line, I'll reformat it to show one line on multiple lines, using Python's pretty-printer. + +~~~ +$ tail -1 /tmp/data.json | python -mjson.tool +{ + "destinationIPv4Address": "«IP»", + "destinationTransportPort": 443, + "egressInterface": 2147483651, + "exportingProcessId": 0, + "flowEndMicroseconds": "2015-03-26T01:52:12.000Z", + "flowId": 27059599, + "flowStartMicroseconds": "2015-03-26T01:52:12.000Z", + "ingressInterface": 2, + "ipVersion": 4, + "ipfix_exporter_ip": "«IP»", + "ipfix_template_id": "258", + "ipfix_template_source": "fallback", + "ipfix_timestamp": "2015-03-26T01:52:12Z", + "netscaler_app_name_app_id": 9541, + "netscaler_app_unit_name_app_id": 0, + "netscaler_connection_chain_hop_count": 0, + "netscaler_connection_chain_id": "00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00", + "netscaler_connection_id": 27059599, + "netscaler_flow_flags": 84025344, + "netscaler_http_req_cookie": "«Cookies»", + "netscaler_http_req_host": "«Host»", + "netscaler_http_req_method": "GET", + "netscaler_http_req_referer": "«Referrer»", + "netscaler_http_req_url": "«Request URI»", + "netscaler_http_req_user_agent": "«User agent»", + "netscaler_http_res_forw_fb": "2036-02-07T06:28:16.000Z", + "netscaler_http_res_forw_lb": "2036-02-07T06:28:16.000Z", + "netscaler_transaction_id": 2958993, + "observationPointId": 6914017, + "octetDeltaCount": 682, + "packetDeltaCount": 1, + "protocolIdentifier": 6, + "sourceIPv4Address": "«IP»", + "sourceTransportPort": 50473, + "tcpControlBits": 24 +} +~~~ + +At this point, its useful to remember that every LINE is a separate JSON document. But the FILE is *NOT* a valid JSON data-structure, so you can't process the file (or more than one line of the file) using a tool that expects JSON (unless it can handle JSONlines). + +~~~ +$ tail -2 /tmp/data.json | python -mjson.tool +Extra data: line 2 column 1 - line 3 column 1 (char 911 - 2430) +~~~ + From c9884c25e20f1b2a6e2fe481d9775a766e1ea0d9 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 26 Mar 2015 17:13:23 +1300 Subject: [PATCH 41/48] Continued on some release documentation --- README.md | 370 +++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 365 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 4682a77..6aa233e 100644 --- a/README.md +++ b/README.md @@ -127,9 +127,9 @@ DATA RECORD: [ipfix_collector] got signo 2, bye. ~~~ -I've anonymised various fields. - -Now have a look at /tmp/data.json. Because its all one long line, I'll reformat it to show one line on multiple lines, using Python's pretty-printer. +I've anonymised various fields. Now have a look at /tmp/data.json. Because its +all one long line, I'll reformat it to show one line on multiple lines, using +Python's pretty-printer. ~~~ $ tail -1 /tmp/data.json | python -mjson.tool @@ -172,10 +172,370 @@ $ tail -1 /tmp/data.json | python -mjson.tool } ~~~ -At this point, its useful to remember that every LINE is a separate JSON document. But the FILE is *NOT* a valid JSON data-structure, so you can't process the file (or more than one line of the file) using a tool that expects JSON (unless it can handle JSONlines). +At this point, its useful to remember that every LINE is a separate JSON +document. But the FILE is **NOT** a valid JSON data-structure, so you can't +process the file (or more than one line of the file) using a tool that expects +JSON (unless it can handle JSONlines). + +Note: did you know that proper JSON says to escape the '/' character? That may +come as something of a surprise to you. You'll also perhaps notice that the +NetScaler logs already excape the URL according to the Common Logging Format +(CLF) convention. I shall perhaps look at decoding them and encoding them as +UTF-8, but that is not a priority. ~~~ $ tail -2 /tmp/data.json | python -mjson.tool Extra data: line 2 column 1 - line 3 column 1 (char 911 - 2430) ~~~ - + +Running as a daemon +=== + +FIXME: this feature coming soon + + +Add a service account +=== + +I'll create a local user that this software will run as. That user will only +need access to write to the log file. I suggest you set the group permissions +for whatever will be reading the logs (eg. nxlog, as shown later). Naturally, +change the path to suit your needs. + +~~~ +sudo /usr/sbin/useradd --system --user-group ipfix +sudo install --directory --owner ipfix --group nxlog --mode 0750 /logs/current/ipfix/ +~~~ + +Let's see how to run it by hand. Since it doesn't run as a daemon yet, I could run it using something like 'nohup', and redirect its (overly verbose) stdout/stderr to /dev/null, but for now, I'd prefer to run it inside of a 'screen' session. + +~~~ +screen -e^Bb -S ipfix +^BA (set window's title to) ipfix collector +sudo su - ipfix +export LD_LIBRARY_PATH=/opt/libipfix/lib +/opt/libipfix/bin/ipfix_collector -4 -u --json --jsonfile /logs/current/ipfix/data.json --fallback-templates=netscaler +^Bd (detaches from screen session) +~~~ + +This ends up creating a process-tree like the following: + +~~~ +$ pstree +init─┬─... + ... + ├─screen───bash───sudo───su───bash───ipfix_collector + ... +~~~ + +(Proper daemonisation will come, don't worry). + + +Log files must be rotated +=== + +So now we have the process running, and logging data. Logging data needs to be +rotated, so let's do that now before we forget and cause a problem later on. +Assuming that you're running logrotate, creating a log rotation policy is fairly +easy. Note that because we don't record a PID, as a proper daemon will, it may not +work if there are multiple such processes found. + +~~~ +# cat /etc/logrotate.d/ipfix +/logs/current/ipfix/data.json { + nodateext + rotate 3 + daily + compress + delaycompress + postrotate + skill -HUP -u ipfix -c ipfix_collector + endscript +} +~~~ + +Note that I've specified a rather short rotation lifetime, because I'm passing +all this to nxlog, and nxlog will be looking after retention. Alter to suit +your environment and needs. + +Force a rotation and check that a new file has opened. I like to make my files +in /etc/logrotate.d/ fairly standalone so I can force a rotation on a +particular policy. + +~~~ +logrotate -f /etc/logrotate.d/ipfix +~~~ + + +Do something with the data +=== + +Where you put the file and what you do with it will depend on your use-case. I +will show you how you can use nxlog to tail the file, add some extra +information, and send it on to something like Logstash and Elasticsearch, where +you can then view it with Kibana. + +Read the data with nxlog and forward it to logstash +=== + +Note that logstash can tail a file (I believe), but I prefer to have the data +go into nxlog, because I set nxlog the task of managing data retention, and it +will add some extra data which will help me use the data inside of the rest of +my logging system. Nothing about this program requires (or even knows about) +nxlog, or ELK. Its only assumption is that you can tail a file where each line +is a JSON document. + +Here is about the simplest config for nxlog that will read the file, add some +extra data (for illustration), and send it to logstash. + +~~~ + + Module im_file + File "/logs/current/ipfix/data.json" + SavePos TRUE + ReadFromLast TRUE + InputType LineBased + Exec parse_json(); \ + $SITE_application_stack = "some_group_of_netscalers"; \ + $SITE_log_type = "ipfix_appflow"; \ + $SITE_environment = "dev"; \ + $EventTime = parsedate($ipfix_timestamp); + + +# IMPORTANT +# ========= +# +# When receiving input as JSON, and then modifying it, beware that the default +# presentation of the output is the same as the input. So, if you add +# SITE_application_stack etc. to the incoming object, and then proceed to write +# it out without having first had to_json() applied to it, you will not get the +# SITE_application_stack attribute added to the outgoing JSON document; this +# messes up the message routing. So remember to apply to_json() in each output +# that is outputting JSON (and similarly for any other such output). +# + + Module om_tcp + Host mylogstash.example.com + Port 5140 + Exec to_json(); + + + + Path in_ipfix_netscalers => out_logstash + +~~~ + +From nxlog, you may not do anything further, but if you like to process things +further in the likes of logstash (eg. putting different major systems in +different sets of indexes inside Elasticsearch), then you may need something +like the followig snippets (treat these as inspiration). Before we go further, +just check your nxlog logging in case of an error. You could even verify that +it sending data to logstash with tcpdump (assuming that tcpdump is up at the time). + +~~~ +tcpdump -q -p -nn -s0 -i lo -A tcp and port 5140 | grep netscalers +~~~ + +Here's the start of a suitable logstash configuration. + +~~~ +input +{ + tcp + { + host => "0.0.0.0" + port => 5140 + mode => "server" + codec => "json_lines" + } +} + +filter +{ + # We create a different index for each day, which makes removing old data + # fairly easy. It also means that we can optimise old indexes (which we + # shouldn't need to do unless we've deleted documents from an index), or + # reduce the number of replicas for old data, or change where an index is + # stored. + # + # Some application stacks are very heavy in terms of log volume. To + # give us more flexibility in how we handle those indexes (such as + # removing or reducing replica count earlier than we would otherwise), + # we can put them into different indexes in a case-by-case basis, and + # the rest will go into a common index. + # + # Note that the variable name must be lowercased in the template name + # (and ONLY in the template name); I think it is interpreted by Elastic + # Search, not by LogStash, and ES seems to want it lowercase. + # + # One symptom of the template not applying is that the .raw attributes, + # such as username.raw, aren't available. + # + if [SITE_application_stack] in ["bigone", "megaone", "netscalers"] + { + alter + { + add_field => + { + "site_index_basename" => "%{SITE_application_stack}" + } + } + } + else + { + alter + { + add_field => + { + "site_index_basename" => "logstash" + } + } + } + + date + { + match => ["EventTime", "YYYY-MM-dd HH:mm:ss"] + } +} + +output +{ + # Kibana 4 (up to at least beta 4) requires all nodes to be ES version 1.4.0+, + # as it doesn't know (although the data is there) how to differentiate a + # client node + # + # Doc: http://logstash.net/docs/1.4.2/outputs/elasticsearch_http + # + elasticsearch_http + { + host => "127.0.0.1" + template_name => "%{site_index_basename}" + index => "%{site_index_basename}-%{+YYYY.MM.dd}" + } +} +~~~ + +There are other common things you could do, such as geoip lookups and user-agent breakdown, but that's well outside the scope of this document. + +If you are sorting things into different groups of indexes, then you may need to do something with your templates in Elasticsearch. Access the REST interface (I suggest using the Koph plugin -- use whatever you are comfortable with) and get the template for 'logstash'. + +~~~ +# curl -XGET localhost:9200/_template/logstash?pretty +{ + "logstash" : { + "order" : 0, + "template" : "logstash-*", + "settings" : { + "index.refresh_interval" : "30s", + "index.number_of_replicas" : "1" + }, + "mappings" : { + "_default_" : { + "dynamic_templates" : [ { + "string_fields" : { + "mapping" : { + "index" : "analyzed", + "omit_norms" : true, + "type" : "string", + "fields" : { + "raw" : { + "ignore_above" : 256, + "index" : "not_analyzed", + "type" : "string" + } + } + }, + "match_mapping_type" : "string", + "match" : "*" + } + } ], + "properties" : { + "geoip" : { + "path" : "full", + "dynamic" : true, + "type" : "object", + "properties" : { + "location" : { + "type" : "geo_point" + } + } + }, + "@version" : { + "index" : "not_analyzed", + "type" : "string" + } + }, + "_all" : { + "enabled" : true + } + } + }, + "aliases" : { } + } +} +~~~ + +I increase the refresh interval to about 30s for larger things (this is more efficient). Change the bit where it says "logstash-*" to be "netscalers-*", and removing the outer layering as shown, PUT the new template. + +~~~ +curl -XPUT localhost:9200/_template/netscalers -d ' +{ + "template" : "netscalers-*", + "settings" : { + "index.refresh_interval" : "30s", + "index.number_of_replicas" : "1" + }, + "mappings" : { + "_default_" : { + "dynamic_templates" : [ { + "string_fields" : { + "mapping" : { + "index" : "analyzed", + "omit_norms" : true, + "type" : "string", + "fields" : { + "raw" : { + "ignore_above" : 256, + "index" : "not_analyzed", + "type" : "string" + } + } + }, + "match_mapping_type" : "string", + "match" : "*" + } + } ], + "properties" : { + "geoip" : { + "path" : "full", + "dynamic" : true, + "type" : "object", + "properties" : { + "location" : { + "type" : "geo_point" + } + } + }, + "@version" : { + "index" : "not_analyzed", + "type" : "string" + } + }, + "_all" : { + "enabled" : true + } + } + }, + "aliases" : { } +} +' +~~~ + +NOTE: I have not attempted to optimise the mapping that this template would produce. I know there is plenty of work in that area that could be done. + +Make sure you get the following output + +~~~ +{"acknowledged":true} +~~~ + From 12a2b93c968b285486d6c255ce2ee5c635cd7141 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Thu, 26 Mar 2015 21:16:49 +1300 Subject: [PATCH 42/48] Added a Kibana3 dashboard (will move to Kibana 4 soon) --- .../kibana3/Netscaler.kibana3-dashboard.json | 468 ++++++++++++++++++ 1 file changed, 468 insertions(+) create mode 100644 doc/jsonlines/kibana3/Netscaler.kibana3-dashboard.json diff --git a/doc/jsonlines/kibana3/Netscaler.kibana3-dashboard.json b/doc/jsonlines/kibana3/Netscaler.kibana3-dashboard.json new file mode 100644 index 0000000..13048a3 --- /dev/null +++ b/doc/jsonlines/kibana3/Netscaler.kibana3-dashboard.json @@ -0,0 +1,468 @@ +{ + "title": "Netscaler Messages", + "services": { + "query": { + "list": { + "0": { + "id": 0, + "type": "topN", + "query": "*", + "alias": "breakdown by exporter", + "color": "#0A437C", + "pin": false, + "enable": true, + "field": "ipfix_exporter_ip.raw", + "size": 10, + "union": "AND" + }, + "1": { + "id": 1, + "color": "#EAB839", + "alias": "exporters seen", + "pin": false, + "type": "lucene", + "enable": true, + "query": "ipfix_collector_notice:\"newsource\"" + }, + "2": { + "id": 2, + "color": "#6ED0E0", + "alias": "missing template reports", + "pin": false, + "type": "lucene", + "enable": true, + "query": "ipfix_collector_notice:\"no_template_for_set\"" + }, + "3": { + "id": 3, + "color": "#EF843C", + "alias": "user mentions", + "pin": false, + "type": "lucene", + "enable": true, + "query": "exists:netscaler_aaa_username" + } + }, + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "filter": { + "list": { + "0": { + "type": "time", + "field": "@timestamp", + "from": "now-2d", + "to": "now", + "mandate": "must", + "active": true, + "alias": "", + "id": 0 + } + }, + "ids": [ + 0 + ] + } + }, + "rows": [ + { + "title": "Graph", + "height": "250px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "span": 12, + "editable": true, + "group": [ + "default" + ], + "type": "histogram", + "mode": "count", + "time_field": "@timestamp", + "value_field": null, + "auto_int": true, + "resolution": 100, + "interval": "30m", + "fill": 3, + "linewidth": 3, + "timezone": "browser", + "spyable": true, + "zoomlinks": true, + "bars": false, + "stack": false, + "points": false, + "lines": true, + "legend": true, + "x-axis": true, + "y-axis": true, + "percentage": false, + "interactive": true, + "queries": { + "mode": "selected", + "ids": [ + 0 + ] + }, + "title": "Events over time", + "intervals": [ + "auto", + "1s", + "1m", + "5m", + "10m", + "30m", + "1h", + "3h", + "12h", + "1d", + "1w", + "1M", + "1y" + ], + "options": true, + "tooltip": { + "value_type": "cumulative", + "query_as_alias": true + }, + "scale": 1, + "y_format": "short", + "grid": { + "max": null, + "min": 0 + }, + "annotate": { + "enable": true, + "query": "ipfix_collector_notice:*", + "size": 20, + "field": "summary", + "sort": [ + "_score", + "desc" + ] + }, + "pointradius": 3, + "show_query": true, + "legend_counts": true, + "zerofill": true, + "derivative": false + } + ], + "notice": false + }, + { + "title": "Breakdowns", + "height": "300px", + "editable": true, + "collapse": false, + "collapsable": true, + "panels": [ + { + "error": false, + "span": 2, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "ipfix_template_id", + "exclude": [], + "missing": true, + "other": true, + "size": 10, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": true, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "below", + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "Template IDs" + }, + { + "error": false, + "span": 3, + "editable": true, + "type": "text", + "loadingEditor": false, + "mode": "markdown", + "content": "* 256 TCP IPv4 flow (?)\n* 257 TCP IPv4 flow with RTT (?)\n* **258 HTTP access log without response code. COMMON**\n* 259 TCP IPv6 flow (?)\n* 260 TCP IPv6 flow with RTT (?)\n* 261 HTTP IPv6 access without response code (?)\n* 262 HTTP IPv4 access with JS timings and response code (?)\n* 263 HTTP IPv6 access with JS timings and response code (?)\n* 264 Syslog Message (via AppFlow)\n* 265 App Name Mappings (?)\n* 266 HTTP user interaction timings (?)\n* 267 HTTP client render timings (?)\n* 268 ... no information ...\n* 269 Database IPv4 ... (?)\n* 270 Database IPv4 ... (?)\n* 271 ICA IPv4 ... (?)\n* 272 ICA timing report (?)\n* 273 ICA channel utilisation (?)\n* 274 ICA application launch (?)\n* 275 ICA application termination (?)\n* 276 ICA session end (?)\n* 277 NetScaler licence utilisation\n* 278 **IPv4 application & username mapping. USEFUL**\n* 279 IPv6 application & username mapping (?)\n* 280 NetScaler transaction & flow flags (?)\n* 281 HTTP IPv4 ... +RTT, +AppName, -Req, +User, +NSTimings (?)\n* 282 ... no information ...\n* 283 ... no information ...\n* 284 ... no information ...\n* 285 ... something about cache redirection ...\n* 286 ... no information ...", + "style": {}, + "title": "Template Mappings" + }, + { + "error": false, + "span": 2, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "ipfix_exporter_ip.raw", + "exclude": [], + "missing": true, + "other": true, + "size": 20, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": true, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "pie", + "counter_pos": "below", + "spyable": true, + "queries": { + "mode": "selected", + "ids": [ + 1 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "Exporters (all)" + }, + { + "error": false, + "span": 3, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "netscaler_aaa_username.raw", + "exclude": [ + "" + ], + "missing": false, + "other": true, + "size": 50, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": true, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "bar", + "counter_pos": "below", + "spyable": true, + "queries": { + "mode": "selected", + "ids": [ + 0 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "Top Users" + }, + { + "error": false, + "span": 2, + "editable": true, + "type": "terms", + "loadingEditor": false, + "field": "netscaler_http_req_host.raw", + "exclude": [], + "missing": false, + "other": true, + "size": 10, + "order": "count", + "style": { + "font-size": "10pt" + }, + "donut": true, + "tilt": false, + "labels": true, + "arrangement": "horizontal", + "chart": "bar", + "counter_pos": "below", + "spyable": true, + "queries": { + "mode": "selected", + "ids": [ + 0 + ] + }, + "tmode": "terms", + "tstat": "total", + "valuefield": "", + "title": "HTTP Hosts" + } + ], + "notice": false + }, + { + "title": "Events", + "height": "350px", + "editable": true, + "collapse": true, + "collapsable": true, + "panels": [ + { + "title": "All events", + "error": false, + "span": 12, + "editable": true, + "group": [ + "default" + ], + "type": "table", + "size": 100, + "pages": 5, + "offset": 0, + "sort": [ + "ipfix_exporter_ip", + "desc" + ], + "style": { + "font-size": "9pt" + }, + "overflow": "min-height", + "fields": [ + "@timestamp", + "ipfix_template_id", + "sourceIPv4Address", + "ipfix_exporter_ip", + "destinationIPv4Address", + "destinationTransportPort", + "netscaler_aaa_username", + "netscaler_app_name_app_id", + "netscaler_app_name" + ], + "localTime": true, + "timeField": "@timestamp", + "highlight": [], + "sortable": true, + "header": true, + "paging": true, + "spyable": true, + "queries": { + "mode": "all", + "ids": [ + 0, + 1, + 2, + 3 + ] + }, + "field_list": true, + "status": "Stable", + "trimFactor": 300, + "normTimes": true, + "all_fields": true + } + ], + "notice": false + } + ], + "editable": true, + "failover": false, + "index": { + "interval": "day", + "pattern": "[netscalers-]YYYY.MM.DD", + "default": "NO_TIME_FILTER_OR_INDEX_PATTERN_NOT_MATCHED", + "warm_fields": true + }, + "style": "dark", + "panel_hints": true, + "pulldowns": [ + { + "type": "query", + "collapse": false, + "notice": false, + "query": "*", + "pinned": true, + "history": [ + "exists:netscaler_aaa_username", + "ipfix_collector_notice:\"no_template_for_set\"", + "ipfix_collector_notice:\"newsource\"", + "*", + "netscaler_aaa_username:*", + "netscaler_aaa_user:*", + "ipfix_collector_notice:*", + "netscaler_app_name:*", + "1494" + ], + "remember": 10, + "enable": true + }, + { + "type": "filtering", + "collapse": false, + "notice": true, + "enable": true + } + ], + "nav": [ + { + "type": "timepicker", + "collapse": false, + "notice": false, + "status": "Stable", + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ], + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "timefield": "@timestamp", + "now": true, + "filter_id": 0, + "enable": true + } + ], + "loader": { + "save_gist": false, + "save_elasticsearch": true, + "save_local": true, + "save_default": true, + "save_temp": true, + "save_temp_ttl_enable": true, + "save_temp_ttl": "30d", + "load_gist": true, + "load_elasticsearch": true, + "load_elasticsearch_size": 20, + "load_local": true, + "hide": false + }, + "refresh": false +} From ecabe4c681a7e85164d8251685c39ccf7015d170 Mon Sep 17 00:00:00 2001 From: James Wheatley Date: Wed, 22 Apr 2015 14:17:41 +0100 Subject: [PATCH 43/48] Added ipfix_init_with_start_time function ipfix_init always uses the current time for the start time, which is not ideal and means that we can't control the exported uptime. The ipfix_init_with_start_time function takes a time_t to calculate uptime from. Signed-off-by: James Wheatley --- lib/ipfix.c | 13 ++++++++++++- lib/ipfix.h | 1 + 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/lib/ipfix.c b/lib/ipfix.c index 9818648..1f397ef 100644 --- a/lib/ipfix.c +++ b/lib/ipfix.c @@ -677,6 +677,17 @@ int ipfix_get_eno_ieid( char *field, int *eno, int *ieid ) * remarks: init module, read field type info. */ int ipfix_init( void ) +{ + time_t tstart = time(NULL); + return ipfix_init_with_start_time(tstart); +} + +/* + * name: ipfix_init_with_start_time() + * parameters: > tstart time_t from which to calculate system uptime + * remarks: init module, read field type info. + */ +int ipfix_init_with_start_time( time_t tstart ) { /* check and store in global flag, * whether we are on a Small or BigEndian machine */ @@ -693,7 +704,7 @@ int ipfix_init( void ) return -1; } #endif - g_tstart = time(NULL); + g_tstart = tstart; signal( SIGPIPE, SIG_IGN ); g_lasttid = 255; diff --git a/lib/ipfix.h b/lib/ipfix.h index 88e0980..7f06884 100644 --- a/lib/ipfix.h +++ b/lib/ipfix.h @@ -243,6 +243,7 @@ extern int ipfix_snprint_ipaddr( char *str, size_t size, void *data, size_t len /** common funcs */ int ipfix_init( void ); +int ipfix_init_with_start_time( time_t tstart ); int ipfix_add_vendor_information_elements( ipfix_field_type_t *fields ); int ipfix_reload( void ); void ipfix_cleanup( void ); From ed128ad483a2b72890bc878858a946719270a561 Mon Sep 17 00:00:00 2001 From: cameronkerrnz Date: Thu, 30 Mar 2017 15:16:45 +1300 Subject: [PATCH 44/48] Remove my username from the checkout documentation --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6aa233e..7bfb79a 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ As for Red Hat Enterprise Linux 6. I hope to make an RPM for the next release. ~~~ sudo yum install "@Development Tools" libpcap-devel -git clone https://cameronkerrnz@github.com/cameronkerrnz/libipfix.git +git clone https://github.com/cameronkerrnz/libipfix.git cd libipfix ./configure --prefix=/opt/libipfix --enable-jsonlines --enable-fallback-templates make From 7f9e8aeee97dc0966c30a9fa25560ac3ffd03d7f Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Mon, 10 Apr 2017 15:38:32 +1200 Subject: [PATCH 45/48] DISCONTINUING IN FAVOUR OF LOGSTASH NETFLOW CODEC As my use-case is around getting data into ELK, I'm migrating my efforts to https://github.com/logstash-plugins/logstash-codec-netflow. All development on this fork of libipfix will hereby cease. --- README.md | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/README.md b/README.md index 7bfb79a..5d84b0d 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,12 @@ +DISCONTUING WORK ON THIS PROJECT +=== + +I'm going to look at using https://github.com/logstash-plugins/logstash-codec-netflow instead, which is at least more actively +maintained than I can resource for this particular project. + + + + libipfix (forked version of 1.0 with many patches to make json-milestone-2 or so) From 31a83c5cec25e7bf348f9e6fe3d2c5c9a97a219c Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Sat, 18 Nov 2017 22:15:33 +1300 Subject: [PATCH 46/48] Resolve #31 --- lib/ipfix_fallback_templates_netscaler.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/ipfix_fallback_templates_netscaler.c b/lib/ipfix_fallback_templates_netscaler.c index f4ee1b6..1e8a5e1 100644 --- a/lib/ipfix_fallback_templates_netscaler.c +++ b/lib/ipfix_fallback_templates_netscaler.c @@ -60,12 +60,13 @@ fallback_template_t netscaler_fallback_templates[] = } } , { 258 - , 37 /* FIXME some say 38 (with netscaler_aaa_username) and some (that usually need a fallback) imply 37 (without netscaler_aaa_username), so reducing to 37 */ + , 38 /* version specific? see "unknown_bytes" IE below */ , { { 0, 138, 4 } /* observationPointId */ , { 0, 144, 4 } /* exportingProcessId */ , { 0, 148, 8 } /* flowId */ , { 5951, 129, 4 } /* netscaler_transaction_id */ , { 5951, 133, 4 } /* netscaler_connection_id */ + , { 5951, 999, 2 } /* unknown_bytes */ , { 0, 60, 1 } /* ipVersion */ , { 0, 4, 1 } /* protocolIdentifier */ , { 0, 210, 2 } /* paddingOctets */ From 7da105c47b926147394a9596c836b4806e29d568 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Sat, 18 Nov 2017 22:25:44 +1300 Subject: [PATCH 47/48] Added supervisord example configuration --- doc/supervisord-example.ini | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) create mode 100644 doc/supervisord-example.ini diff --git a/doc/supervisord-example.ini b/doc/supervisord-example.ini new file mode 100644 index 0000000..9b07f28 --- /dev/null +++ b/doc/supervisord-example.ini @@ -0,0 +1,17 @@ +# This is an example for running ipfix_collector as a supervisord job. +# You might put this in /etc/supervisord.d/ipfix-receiver.ini +# +[program:ipfix-receiver] +command=/opt/libipfix/bin/ipfix_collector -4 -u --json --jsonfile /logs/current/ipfix/data.json --fallback-templates=netscaler +environment=LD_LIBRARY_PATH="/opt/libipfix/lib" + +# Change as appropriate +user=ipfix +group=nxlog + +autostart=true +autorestart=true +startsecs=2 +startretries=10 + +# Note; it would be better for the JSON output to go to stdout, then we could use supervisord to collect the logs and rotate the file. From 8c1013eec5858f78add276f1ade4cd4c2122bfd9 Mon Sep 17 00:00:00 2001 From: Cameron Kerr Date: Sat, 18 Nov 2017 22:38:25 +1300 Subject: [PATCH 48/48] Update documentation with recent changes --- README.md | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/README.md b/README.md index 5d84b0d..ef56c12 100644 --- a/README.md +++ b/README.md @@ -200,7 +200,9 @@ Extra data: line 2 column 1 - line 3 column 1 (char 911 - 2430) Running as a daemon === -FIXME: this feature coming soon +Run this under a system such as SystemD or Supervisord. I'm deploying this on RHEL6 with Supervisord installed from pip (and currently requires Python 2.7, which you can get from Red Hat's SCL channel. + +There is an example supervisord configuration in the doc/ directory. Add a service account @@ -216,29 +218,13 @@ sudo /usr/sbin/useradd --system --user-group ipfix sudo install --directory --owner ipfix --group nxlog --mode 0750 /logs/current/ipfix/ ~~~ -Let's see how to run it by hand. Since it doesn't run as a daemon yet, I could run it using something like 'nohup', and redirect its (overly verbose) stdout/stderr to /dev/null, but for now, I'd prefer to run it inside of a 'screen' session. +Let's see how to run it by hand. ~~~ -screen -e^Bb -S ipfix -^BA (set window's title to) ipfix collector sudo su - ipfix export LD_LIBRARY_PATH=/opt/libipfix/lib /opt/libipfix/bin/ipfix_collector -4 -u --json --jsonfile /logs/current/ipfix/data.json --fallback-templates=netscaler -^Bd (detaches from screen session) -~~~ - -This ends up creating a process-tree like the following: - ~~~ -$ pstree -init─┬─... - ... - ├─screen───bash───sudo───su───bash───ipfix_collector - ... -~~~ - -(Proper daemonisation will come, don't worry). - Log files must be rotated ===