Showing posts with label monitoring. Show all posts
Showing posts with label monitoring. Show all posts

Wednesday, July 9, 2008

Monitoring your servers with sysstat (sar)

There's sometimes things that are so helpfull that you think that everyone is aware of them, but sometimes this is not the case. Here I'll talk about a little package that is so powerful and efficient that you won't change anymore...
Taken from the ubuntu man page:

DESCRIPTION
The sar command writes to standard output the contents of selected cumula-
tive activity counters in the operating system. The accounting system, based
on the values in the count and interval parameters, writes information the
specified number of times spaced at the specified intervals in seconds. If
the interval parameter is set to zero, the sar command displays the average
statistics for the time since the system was started. The default value for
the count parameter is 1. If its value is set to zero, then reports are gen-
erated continuously. The collected data can also be saved in the file spec-
ified by the -o filename flag, in addition to being displayed onto the
screen. If filename is omitted, sar uses the standard system activity daily
data file, the /var/log/sysstat/sadd file, where the dd parameter indicates
the current day. By default all the data available from the kernel are
saved in the data file. Exceptions are interrupts and disks data, for which
the relevant options must be explicitly passed to sar (or to its backend
sadc ) when the data file is created (see options below).


"sar" comes with the sysstat package. Once it's installed you can monitor your server like never before...

Here's the description of the sysstat package from the author
The sysstat utilities are a collection of performance monitoring tools for Linux. 
These include sar, sadf, mpstat, iostat, pidstat and sa tools. Go to the Features page to display
a list of sysstat's features, or see the Documentation page to learn some more about them.

For example, you can watch realtime the network usage:

# sar -n DEV 1 0
Linux 2.6.22-15-generic (xXxXx) 07/09/2008

11:26:36 AM IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s
11:26:37 AM lo 0.00 0.00 0.00 0.00 0.00 0.00 0.00
11:26:37 AM eth0 5.05 0.00 0.86 0.00 0.00 0.00 0.00

11:26:37 AM IFACE rxpck/s txpck/s rxkB/s txkB/s rxcmp/s txcmp/s rxmcst/s
11:26:38 AM lo 0.00 0.00 0.00 0.00 0.00 0.00 0.00
11:26:38 AM eth0 4.00 0.00 0.45 0.00 0.00 0.00 0.00
...

Today, I'll introduce the erlang-sar package that's able to retrieve information from the sar command.

The application is composed of a collector "sar_collector", a helper module "sar_values" and the main module "sar".
Here comes a quick sample session:

% Starting the collector
sar_collect:start().

% Retrieving the data
sar:stats(cpu).
[{cpu,idle,<<"98.62">>},
{cpu,steal,<<"0.00">>},
{cpu,iowait,<<"0.00">>},
{cpu,system,<<"0.18">>},
{cpu,nice,<<"0.00">>},
{cpu,user,<<"1.20">>}]

% Retrieving more data
sar:stats([cpu,mem]).
[{swap,swpcad,<<"33236">>},
{swap,usage,<<"64.72">>},
{swap,used,<<"389872">>},
{swap,free,<<"212492">>},
{mem,kbcached,<<"84496">>},
{mem,kbbuffers,<<"63408">>},
{mem,memused,<<"98.78">>},
{mem,kbmemused,<<"508984">>},
{mem,kbmemfree,<<"6308">>},
{cpu,idle,<<"97.83">>},
{cpu,steal,<<"0.00">>},
{cpu,iowait,<<"0.75">>},
{cpu,system,<<"0.20">>},
{cpu,nice,<<"0.00">>},
{cpu,user,<<"1.22">>}]


The module "sar_values" also export an "extractor" function that can be used to build fun()s:

% build a Mem fun()
Mem = sar_values:extractor(mem).

% Calling Mem fun() on sar:stats()
Mem(sar:stats([cpu,mem])).
[{kbcached,<<"84496">>},
{kbbuffers,<<"63480">>},
{memused,<<"98.77">>},
{kbmemused,<<"508976">>},
{kbmemfree,<<"6316">>}]

% Calling it on sar:stats()
Mem(sar:stats()).
[{kbcached,<<"84496">>},
{kbbuffers,<<"63520">>},
{memused,<<"98.80">>},
{kbmemused,<<"509100">>},
{kbmemfree,<<"6192">>}]


With this package you have access to all the data sar can export for you.
Here's the "sar.erl" code:

-module(sar).

-export([systat/0, stats/0, stats/1, option/1]).
-export([extract/1]).
-define(OPTIONS, "-u -r -v -c -q -n DEV").
-define(DATA, "/tmp/last").

systat() ->
Cmd = "sadf " ++ ?DATA ++ " -- " ++ ?OPTIONS,
execute(".", Cmd).

stats() ->
Cmd = "sadf " ++ ?DATA ++ " -- " ++ ?OPTIONS,
{ok, _, Bin} = execute(".", Cmd),
extract(Bin).

stats(List) when is_list(List) ->
Args = lists:foldl(fun(X, Acc) -> case option(X) of
error ->
Acc;
T ->
[ $ , T | Acc ]
end end, [], List),
Cmd = "sadf " ++ ?DATA ++ " -- " ++ lists:reverse(Args),
{ok, _, Bin} = execute(".", lists:flatten(Cmd)),
extract(Bin);

stats(Elem) ->
stats([Elem]).

option(cpu) ->
"-u";
option(disk) ->
"-d";
option(sock) ->
"-n SOCK";
option(eth0) ->
"-n DEV";
option(eth1) ->
"-n DEV";
option(eth2) ->
"-n DEV";
option(proc) ->
"-c";
option(run) ->
"-q";
option(mem) ->
"-r";
option(inode) ->
"-v";
option(switch) ->
"-w";
option(swaping) ->
"-W";
option(_) ->
error.

execute(_Host, Cmd) ->
Port = open_port({spawn, Cmd}, [ exit_status, binary ] ),
wait(Port, []).

wait(Port, Content) ->
receive
{Port, {data, BinData}} ->
%error_logger:info_msg("dump:~n~p~n", [BinData]),
NewContent = [ BinData | Content ],
wait(Port, NewContent);

{Port, {exit_status, Status}} ->
%error_logger:info_msg("exit_code: ~p~n", [Status]),
{ok, Status, Content};

{Port, eof} ->
%error_logger:info_msg("Port closed"),
port_close(Port),
{ok, eof, Content};

{Port, exit} ->
error_logger:info_msg("Received : ~p~n", [Port]),
Content
end.

extract(Bin) ->
sar_values:extract(iolist_to_binary(Bin)).


You can see the "option/1" function that let you convert atoms into command line arguments easily. I use also this function to test if sar is able to handle a specific parameter. For example and with the help of my webservice I can query remote stats easily:
http://monitoring.lan/stats/q/cpu/servername


Here's the "sar_collect" module

-module(sar_collect).

-export([systat/1, sartime/1, start/0, start/1]).
-export([extract/1]).
spawn(?MODULE, systat, []).

start(Seconds) ->
spawn(?MODULE, systat, [Seconds]).

% update the file every second
systat(0) ->
loop(1);

systat(Seconds) ->
loop(Seconds).

%update the file every 59 seconds
systat() ->
loop(59).

loop(Seconds) when Seconds < 60 ->
Cmd = lists:flatten([ "sar -o /tmp/last.tmp ", integer_to_list(Seconds), " 1" ]),
execute(".", Cmd),
file:rename("/tmp/last.tmp", "/tmp/last"),
timer:sleep(60 - Seconds),
receive
stop ->
exit(normal);

{interval, NewSeconds} ->
loop(NewSeconds);

_A ->
loop(Seconds)

after 0 ->
loop(Seconds)

end;

%default update 20 seconds (arbitrary chosen)
loop(_Seconds) ->
loop(20).

execute(Host, Cmd) ->
Port = open_port({spawn, Cmd}, [ {cd, Host}, exit_status, binary ] ),
wait(Port, []).

wait(Port, Content) ->
receive
{Port, {data, _BinData}} ->
wait(Port, Content);

{Port, {exit_status, _Status}} ->
ok;

{Port, eof} ->
port_close(Port),
Content;

{Port, exit} ->
error_logger:info_msg("Received : ~p~n", [Port]),
Content
end.


Finally there is the "sar_values" source code:

-module(sar_values).

-export([extract/1, extractor/1, sort/1]).
-export([parse/1, parse_value/2]).

extract(Bin) ->
extract(Bin, []).

extract(Bin, Stats) ->
case parse(Bin) of
{Class, Type, Rest} ->
%io:format("~p.~p", [Class, Type]),
case parse_value(Rest, <<>>) of
{more, Value, More} ->
NewStats = [ {Class, Type, Value} | Stats ],
extract(More, NewStats);

{eof, Value} ->
NewStats = [ {Class, Type, Value} | Stats ],
NewStats
end;

eof ->
Stats
end.

parse(<<"%user", Rest/binary >>) -> {cpu, user, Rest};
parse(<<"%nice", Rest/binary>>) -> {cpu, nice, Rest};
parse(<<"%system", Rest/binary>>) -> {cpu, system, Rest};
parse(<<"%iowait", Rest/binary>>) -> {cpu, iowait, Rest};
parse(<<"%steal", Rest/binary>>) -> {cpu, steal, Rest};
parse(<<"%idle", Rest/binary>>) -> {cpu, idle, Rest};

parse(<<"kbmemfree", Rest/binary>>) -> {mem, kbmemfree, Rest};
parse(<<"kbmemused", Rest/binary>>) -> {mem, kbmemused, Rest};
parse(<<"%memused", Rest/binary>>) -> {mem, memused, Rest};
parse(<<"kbbuffers", Rest/binary>>) -> {mem, kbbuffers, Rest};
parse(<<"kbcached", Rest/binary>>) -> {mem, kbcached, Rest};

parse(<<"kbswpfree", Rest/binary>>) -> {swap, free, Rest};
parse(<<"kbswpused", Rest/binary>>) -> {swap, used, Rest};
parse(<<"%swpused", Rest/binary>>) -> {swap, usage, Rest};
parse(<<"kbswpcad", Rest/binary>>) -> {swap, swpcad, Rest};

parse(<<"dentunusd", Rest/binary>>) -> {inode, dentryunused, Rest};
parse(<<"file-sz", Rest/binary>>) -> {inode, fileopened, Rest};
parse(<<"inode-sz", Rest/binary>>) -> {inode, inodes, Rest};
parse(<<"super-sz", Rest/binary>>) -> {inode, super, Rest};
parse(<<"%super-sz", Rest/binary>>) -> {inode, superusage, Rest};
parse(<<"dquot-sz", Rest/binary>>) -> {inode, dquotsz, Rest};
parse(<<"%dquot-sz", Rest/binary>>) -> {inode, dquotszusage, Rest};
parse(<<"rtsig-sz", Rest/binary>>) -> {rtsig, count , Rest};
parse(<<"%rtsig-sz", Rest/binary>>) -> {rtsig, usage, Rest};

parse(<<"totsck", Rest/binary>>) -> {sock, total, Rest};
parse(<<"tcpsck", Rest/binary>>) -> {sock, tcp, Rest};
parse(<<"udpsck", Rest/binary>>) -> {sock, udp, Rest};
parse(<<"rawsck", Rest/binary>>) -> {sock, raw, Rest};
parse(<<"ip-frag", Rest/binary>>) -> {sock, ipfrag, Rest};

parse(<<"runq-sz", Rest/binary>>) -> {procs, running, Rest};
parse(<<"plist-sz", Rest/binary>>) -> {procs, total, Rest};

parse(<<"ldavg-15", Rest/binary>>) -> {load, min15, Rest};
parse(<<"ldavg-1", Rest/binary>>) -> {load, min1, Rest};
parse(<<"ldavg-5", Rest/binary>>) -> {load, min5, Rest};

parse(<<"pswpin/s", Rest/binary>>) -> {swaping, pswpin, Rest};
parse(<<"pswpout/s", Rest/binary>>) -> {swaping, pswpout, Rest};

parse(<<"l0", Rest/binary>>) -> parsebis(Rest, l0);
parse(<<"eth0", Rest/binary>>) -> parsebis(Rest, eth0);
parse(<<"eth1", Rest/binary>>) -> parsebis(Rest, eth1);
parse(<<"eth2", Rest/binary>>) -> parsebis(Rest, eth2);

parse(<<>>) -> eof;

parse(Bin) ->
{_, Next} = split_binary(Bin, 1),
parse(Next).

parsebis(<<"rxpck/s", Rest/binary>>, Category) -> {Category, rxpck, Rest};
parsebis(<<"txpck/s", Rest/binary>>, Category) -> {Category, txpck, Rest};
parsebis(<<"rxbyt/s", Rest/binary>>, Category) -> {Category, rxbyt, Rest};
parsebis(<<"txbyt/s", Rest/binary>>, Category) -> {Category, txbyt, Rest};
parsebis(<<"rxcmp/s", Rest/binary>>, Category) -> {Category, rxcmp, Rest};
parsebis(<<"txcmp/s", Rest/binary>>, Category) -> {Category, txcmp, Rest};
parsebis(<<"rxmcst/s", Rest/binary>>, Category) -> {Category, rxmcst, Rest};
parsebis(Bin, Category) ->
{_, Next} = split_binary(Bin, 1),
parsebis(Next, Category).

parse_value(<<$\t, Rest/binary>>, _Value) ->
parse_value(Rest, _Value);
parse_value(<<$ , Rest/binary>>, _Value) ->
parse_value(Rest, _Value);

parse_value(<<$\n, _Rest/binary>>, Value) ->
{more, Value, _Rest};

parse_value(<<>>, Value) ->
{eof, Value};

parse_value(Bin, Value) ->
{H, Next} = split_binary(Bin, 1),
parse_value(Next, iolist_to_binary([Value, H])).

extractor(Motif) ->
fun(L) when is_list(L) ->
[ {Y, Z} || {X, Y, Z} <- L, X == Motif]
end.

sort(List) ->
lists:sort( fun({X, _V}, {Y, _W}) when X < Y ->
true;
(_A, _B) -> false
end, List).


Now that Erlang is R12B, I'm not so sure if "binary parsing code" is really as efficient as it can...

Monday, May 19, 2008

Monitoring log files with 'tail'

When you need to look for specific events from logfiles, your first idea is to use 'tail'. Tail is obviously the number one command that any sysadmin knows about.

From the first version of Tail and nowadays, some really nice feature have been implemented, one of those is the "follow=name" feature...

Since your erlang node will stay alive for many days, you'll end up meeting some logrotation tool that will replace the file you're lurking... So "follow=name" is for you !

Extract from a manual page:

There are two ways to specify how you'd like to track files with
this option, but that difference is noticeable only when a
followed file is removed or renamed. If you'd like to continue to
track the end of a growing file even after it has been unlinked,
use `--follow=descriptor'. This is the default behavior, but it
is not useful if you're tracking a log file that may be rotated
(removed or renamed, then reopened). In that case, use
`--follow=name' to track the named file by reopening it
periodically to see if it has been removed and recreated by some
other program.


Implementing this feature in pure erlang is of course possible, but why loose time when you can directly use the "tail" binary already installed on your system ?



-module(tail).
-export([start/1, start/2, start/3, stop/1, snapshot/1, display/1, init/3]).

start(File) ->
start(File, fun display/1, "/var/log").

start(File, Callback) ->
Dir = "/var/log",
start(File, Callback, Dir).

start(File, Callback, Dir) ->
spawn_link(?MODULE, init, [File, Callback, Dir]).

snapshot(Pid) ->
Pid ! {snap, self() },
receive
{Port, Callback} ->
{Port, erlang:fun_info(Callback)};
_Any ->
_Any
end.

stop(Pid) ->
Pid ! stop.

init(File, Callback, Dir) ->
Cmd = "/usr/bin/tail --follow=name "++ File,
Port = open_port({spawn, Cmd}, [ {cd, Dir}, stderr_to_stdout, {line, 256}, exit_status, binary]),
tail_loop(Port, Callback).

tail_loop(Port, Callback) ->
receive
{Port, {data, {eol, Bin}}} ->
Callback(Bin),
tail_loop(Port, Callback);

{Port, {data, {noeol, Bin}}} ->
Callback(Bin),
tail_loop(Port, Callback);

{Port, {data, Bin}} ->
Callback(Bin),
tail_loop(Port, Callback);

{Port, {exit_status, Status}} ->
{ok, Status};
%tail_loop(Port, Callback);

{Port, eof} ->
port_close(Port),
{ok, eof};

{snap, Who} ->
Who ! { Port, Callback},
tail_loop(Port, Callback);

stop ->
port_close(Port),
{ok, stop};

_Any ->
tail_loop(Port, Callback)
end.

display(Bin) ->
Content = iolist_to_binary(Bin),
io:format("[INFO] ~s~n", [Content]).



Let's say you want to monitor "/var/log/messages", here's how you can do it:

Shell> Tail = tail:start("messages").

This will display every new line (running in background) in your shell session.

Now let's say you want to do some tricky things with every line, you can pass as a parameter a callback fun:

Shell> Pid = logger_new(). % an example
Shell> Callback = fun(X) -> Pid ! {line, X} end. % sending a tuple to Pid
Shell> Tail = tail:start("message", Callback).


Finally, you'll be able to hack the code and transform this method to "tail" multiple files since "tail" is able to watch more than one file...

Quick tip :

init(ListOfFiles, Callback, Dir) ->
Args = [ [ X, $ ] || X <- ListOfFiles ]
Cmd = "/usr/bin/tail --follow=name "++ lists:flatten(Args),


Happy Tailing !

Monday, November 19, 2007

NAGIOS (beurk) nrpe support for erlang

NAGIOS a pretty bad software uses a pretty bad protocol, but NAGIOS seems to be installed everywhere...
I needed a way to bypass its really poor scheduling process, and naturally erlang comes to my rescue... But everything is not so simple.

NRPE this horrible protocols uses fixed length packets (from the code the 2 last characters are never sets to 0, sizeof seems to be really misunderstood by the nagios developer :p).

But NRPE is another crap CRC32 code, and for efficiency and time saving I didn't wanted to reimplemented it in Erlang, so I wrote a nrpe_crc32 port...

Here's the crc32 code:


#include <unistd.h>
#include <stdio.h>
#include <string.h>

static unsigned long crc32_table[256];

typedef struct packet_struct
{
int16_t packet_version;
int16_t packet_type;
u_int32_t crc32_value;
int16_t result_code;
char buffer[MAX_PACKETBUFFER_LENGTH];
} packet;

/* build the crc table - must be called before calculating the crc value */
void generate_crc32_table(void){
unsigned long crc, poly;
int i, j;

poly=0xEDB88320L;
for(i=0;i<256;i++){
crc=i;
for(j=8;j>0;j--){
if(crc & 1)
crc=(crc>>1)^poly;
else
crc>>=1;
}
crc32_table[i]=crc;
}

return;
}

/* calculates the CRC 32 value for a buffer */
unsigned long calculate_crc32(char *buffer, unsigned int buffer_size){
register unsigned long crc;
int this_char;
int current_index;

crc=0xFFFFFFFF;

for(current_index=0;current_index this_char=(int)buffer[current_index];
crc=((crc>>8) & 0x00FFFFFF) ^ crc32_table[(crc ^ this_char) & 0xFF];
}

return (crc ^ 0xFFFFFFFF);
}

unsigned long test(const char *value)
{
return calculate_crc32((char *) value, strlen(value));
}


The port_driver:

/* port_driver.c */

#include "erl_driver.h"

extern void generate_crc32_table(void);
extern unsigned long calculate_crc32(char *, unsigned int);

typedef struct {
ErlDrvPort port;
} crc32_data;

static ErlDrvData crc32_drv_start(ErlDrvPort port, char *buff)
{
crc32_data* d = (crc32_data*)driver_alloc(sizeof(crc32_data));
d->port = port;

/* init crc32 table */
generate_crc32_table();
return (ErlDrvData) d;
}

static void crc32_drv_stop(ErlDrvData handle)
{
driver_free((char*)handle);
}

static void crc32_drv_output(ErlDrvData handle, char *buff, int bufflen)
{
crc32_data* d = (crc32_data*)handle;

char fn = buff[0];
char *arg = &buff[1];
unsigned long res;

switch (fn) {
case 1:
res = calculate_crc32(arg, bufflen - 1);
driver_output(d->port, (char *) &res, (sizeof(unsigned long)));
break;
default:
break;
}
}

ErlDrvEntry crc32_driver_entry = {
NULL, /* F_PTR init, N/A */
crc32_drv_start, /* L_PTR start, called when port is opened */
crc32_drv_stop, /* F_PTR stop, called when port is closed */
crc32_drv_output, /* F_PTR output, called when erlang has sent */
NULL, /* F_PTR ready_input, called when input descriptor ready */
NULL, /* F_PTR ready_output, called when output descriptor ready */
"crc32_drv", /* char *driver_name, the argument to open_port */
NULL, /* F_PTR finish, called when unloaded */
NULL, /* F_PTR control, port_command callback */
NULL, /* F_PTR timeout, reserved */
NULL /* F_PTR outputv, reserved */
};

DRIVER_INIT(crc32_drv) /* must match name in driver_entry */
{
return &crc32_driver_entry;
}


The crc32 module, initializing the lib, and calling the crc32 fun:


-module(crc32).

-export([start/0,init/1,compute/1]).

start() ->
start("crc32_drv").

start(SharedLib) ->
case erl_ddll:load_driver(".", SharedLib) of
ok -> ok;
{error, already_loaded} -> ok;
_E -> io:format("Error: ~p~n", [_E]),
exit({error, could_not_load_driver})
end,
spawn(?MODULE, init, [SharedLib]).

init(SharedLib) ->
register(?MODULE, self()),
Port = open_port({spawn, SharedLib}, [binary]),
loop(Port).


compute(X) ->
Bin = iolist_to_binary(X),
call_port(<<1, Bin/binary>>).

call_port(Msg) ->
?MODULE ! {call, self(), Msg},
receive
{?MODULE, Result} ->
Result
end.

loop(Port) ->
receive
{call, Caller, Msg} ->
Port ! {self(), {command, Msg}},
receive
{Port, {data, Data}} ->
Caller ! {?MODULE, decode(Data)}
end,
loop(Port);

stop ->
Port ! {self(), close},
receive
{Port, closed} ->
exit(normal)
end;

{'EXIT', Port, Reason} ->
io:format("~p ~n", [Reason]),
exit(port_terminated)
end.

% Also, Valid for Network
decode(<<U:32/big-unsigned>> = Bin) when is_binary(Bin) ->
U.

decode(X) -> X.



Now the nrpe module, there you'll see why the nrpe is pure crap, fixed packet length for this type of tool is nonsense...


-module(nrpe).

-export([encode/1, request/1, crc32/1, connect/1, connect/2]).


encode(Bin) ->
{ Crc, _} = crc32:compute(Bin),
<<Crc:32, Bin>>.

request(Query) ->
Version = 2,
Type = 1,
Crc = 0,
Code = 0,
Blank = <<0:32/unit:256>>, % 1024 bytes
Q = iolist_to_binary(Query),
Padlen = 1024 - size(Q),
{C, _} = crc32:compute(
<<Version:16, Type:16, Crc:32, Code:16, Q/binary, 0, 0, Blank:Padlen/binary>>),
<<Version:16, Type:16, C:32, Code:16, Q/binary, 0, 0, Blank:Padlen/binary>>.


Building two binaries to only send one, is completely dump. But this is required... Thanks
to nrpe...



crc32(Bin) ->
{Crc, _} = crc32:compute(Bin),
{Crc, Bin}.

%
% send_packet.packet_version=(int16_t)htons(NRPE_PACKET_VERSION_2);
% send_packet.packet_type=(int16_t)htons(QUERY_PACKET);
% strncpy(&send_packet.buffer[0],query,MAX_PACKETBUFFER_LENGTH);
% send_packet.buffer[MAX_PACKETBUFFER_LENGTH-1]='\x0';
%
% send_packet.crc32_value=(u_int32_t)0L;
% calculated_crc32=calculate_crc32((char *)&send_packet,sizeof(send_packet));
% send_packet.crc32_value=(u_int32_t)htonl(calculated_crc32);

%% #define QUERY_PACKET 1 /* id code for a packet containing a query */
%% #define RESPONSE_PACKET 2 /* id code for a packet containing a response */
%%
%% #define NRPE_PACKET_VERSION_2 2 /* packet version identifier */
%% #define NRPE_PACKET_VERSION_1 1 /* older packet version identifiers (no longer supported) */
%%
%% #define MAX_PACKETBUFFER_LENGTH 1024 /* max amount of data we'll send in one query/response */

%% typedef struct packet_struct{
%% int16_t packet_version;
%% int16_t packet_type;
%% u_int32_t crc32_value;
%% int16_t result_code;
%% char buffer[MAX_PACKETBUFFER_LENGTH];
%% }packet;

connect(Host) ->
connect(Host, 5666).

connect(Host, Port) ->
case gen_tcp:connect(Host, Port, [binary, {active, false}]) of
{ok, Sock} ->
Query = request("test"),
send(Sock, Query),
io:format("Response: '~s'~n", [recv(Sock)]),
close(Sock);

{error, Error} ->
io:format("Connect-error: ~p~n", [Error])
end.

send(Sock, Data) ->
case gen_tcp:send(Sock, Data) of
ok ->
ok;

{error, Error} ->
io:format("send-error: ~p~n", [Error])
end.

recv(Sock) ->
case gen_tcp:recv(Sock, 0, 2000) of
{ok, Packet} ->
io:format("read: ~p~n", [Packet]),
decode(Packet);

{error, Error} ->
io:format("recv-error: ~p~n", [Error])
end.


close(Sock) ->
gen_tcp:close(Sock).


decode(<<Version:16, Type:16, Crc:32, 0, 0, Rest/binary>>) ->
io:format("Version: ~p, Type: ~p, Crc: ~p~n", [Version, Type, Crc]),
decode_response(Rest).

decode_response(Bin) ->
Len = msg_len(Bin, 0),
{Msg, _} = split_binary(Bin, Len),
Msg.


msg_len(<<0, Rest/binary>>, Len) ->
Len;
msg_len(Bin, Len) ->
{_, Next} = split_binary(Bin, 1),
msg_len(Next, Len + 1).


I hope someone will find this interesting :p

Sticky