Skip to content

Commit

Permalink
core: fix parsing chunked TXT records
Browse files Browse the repository at this point in the history
  • Loading branch information
teafish authored and saghul committed Jun 10, 2016
1 parent 0201657 commit 65417ef
Show file tree
Hide file tree
Showing 4 changed files with 90 additions and 19 deletions.
20 changes: 14 additions & 6 deletions pycares/_cfficore/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -279,23 +279,31 @@ def _query_cb(arg, status, timeouts, abuf, alen):
status = None

elif query_type == _lib.T_TXT:
txt_reply = _ffi.new("struct ares_txt_reply **")
parse_status = _lib.ares_parse_txt_reply(abuf, alen, txt_reply);
txt_reply = _ffi.new("struct ares_txt_ext **")
parse_status = _lib.ares_parse_txt_reply_ext(abuf, alen, txt_reply);
if parse_status != ARES_SUCCESS:
result = None
status = parse_status
else:
result = []
txt_reply_ptr = _ffi.new("struct ares_txt_reply **")
txt_reply_ptr = _ffi.new("struct ares_txt_ext **")
txt_reply_ptr[0] = txt_reply[0]
tmp_obj = None
while True:
if txt_reply_ptr[0] == _ffi.NULL:
if tmp_obj is not None:
result.append(tmp_obj)
break
result.append(ares_query_txt_result(txt_reply_ptr[0]))
if txt_reply_ptr[0].record_start == 1:
if tmp_obj is not None:
result.append(tmp_obj)
tmp_obj = ares_query_txt_result(txt_reply_ptr[0])
else:
new_chunk = ares_query_txt_result(txt_reply_ptr[0])
tmp_obj.text += new_chunk.text
txt_reply_ptr[0] = txt_reply_ptr[0].next
_lib.ares_free_data(txt_reply[0])
status = None

else:
raise ValueError("invalid query type specified")

Expand Down Expand Up @@ -651,7 +659,7 @@ def __init__(self, srv):

class ares_query_txt_result(object):
def __init__(self, txt):
self.txt = _ffi_string(txt.txt)
self.text = _ffi_string(txt.txt)
self.ttl = txt.ttl


Expand Down
15 changes: 13 additions & 2 deletions pycares/_cfficore/pycares_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -517,6 +517,17 @@
int ttl;
};
/* NOTE: This structure is a superset of ares_txt_reply */
struct ares_txt_ext {
struct ares_txt_ext *next;
unsigned char *txt;
size_t length;
/* 1 - if start of new record
* 0 - if a chunk in the same record */
unsigned char record_start;
int ttl;
};
struct ares_naptr_reply {
struct ares_naptr_reply *next;
unsigned char *flags;
Expand Down Expand Up @@ -578,9 +589,9 @@
int alen,
struct ares_mx_reply** mx_out);
int ares_parse_txt_reply(const unsigned char* abuf,
int ares_parse_txt_reply_ext(const unsigned char* abuf,
int alen,
struct ares_txt_reply** txt_out);
struct ares_txt_ext** txt_out);
int ares_parse_naptr_reply(const unsigned char* abuf,
int alen,
Expand Down
49 changes: 39 additions & 10 deletions src/cares.c
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <netdb.h>
#endif
#include "nameser.h"
#include "bytesobject.h"

#define PYCARES_ADDRTTL_SIZE 256

Expand Down Expand Up @@ -454,8 +455,9 @@ query_txt_cb(void *arg, int status,int timeouts, unsigned char *answer_buf, int
{
PyGILState_STATE gstate = PyGILState_Ensure();
int parse_status;
struct ares_txt_reply *txt_reply, *txt_ptr;
PyObject *dns_result, *errorno, *tmp, *result, *callback;
struct ares_txt_ext *txt_reply, *txt_ptr;
PyObject *dns_result, *errorno, *tmp_obj, *result, *callback;
PyObject *assembled_txt;

txt_reply = NULL;
callback = (PyObject *)arg;
Expand All @@ -468,7 +470,7 @@ query_txt_cb(void *arg, int status,int timeouts, unsigned char *answer_buf, int
goto callback;
}

parse_status = ares_parse_txt_reply(answer_buf, answer_len, &txt_reply);
parse_status = ares_parse_txt_reply_ext(answer_buf, answer_len, &txt_reply);
if (parse_status != ARES_SUCCESS) {
errorno = PyInt_FromLong((long)parse_status);
dns_result = Py_None;
Expand All @@ -486,16 +488,43 @@ query_txt_cb(void *arg, int status,int timeouts, unsigned char *answer_buf, int
goto callback;
}

for (txt_ptr = txt_reply; txt_ptr != NULL; txt_ptr = txt_ptr->next) {
tmp = PyStructSequence_New(&AresQueryTXTResultType);
if (tmp == NULL) {
tmp_obj = NULL;
assembled_txt = NULL;
txt_ptr = txt_reply;
while (1) {
if (txt_ptr == NULL || txt_ptr->record_start == 1) {
if (tmp_obj != NULL) {
/* Add the assembled record to the result when seeing a new record (except for the first time) and after the last chunk has been seen */
PyStructSequence_SET_ITEM(tmp_obj, 0, Py_BuildValue("s", PyBytes_AS_STRING(assembled_txt)));
PyList_Append(dns_result, tmp_obj);
Py_DECREF(tmp_obj);
Py_DECREF(assembled_txt);
}
if (txt_ptr == NULL) {
/* Exit while loop when last chunk has been seen */
break;
}
}
if (txt_ptr->record_start == 1) {
/* In case of a new record, prepare its object */
tmp_obj = PyStructSequence_New(&AresQueryTXTResultType);
if (tmp_obj == NULL) {
break;
}
/* ttl of the first chunk is representative for the entire record */
PyStructSequence_SET_ITEM(tmp_obj, 1, PyInt_FromLong((long)txt_ptr->ttl));
assembled_txt = PyBytes_FromString("");
}
/* Concatenate each chunk's text onto the assembled record */
PyBytes_ConcatAndDel(&assembled_txt, PyBytes_FromString((char*)txt_ptr->txt));
if (assembled_txt == NULL) {
Py_DECREF(tmp_obj);
break;
}
PyStructSequence_SET_ITEM(tmp, 0, Py_BuildValue("s", (const char *)txt_ptr->txt));
PyStructSequence_SET_ITEM(tmp, 1, PyInt_FromLong((long)txt_ptr->ttl));
PyList_Append(dns_result, tmp);
Py_DECREF(tmp);
/* Move on to the next chunk */
txt_ptr = txt_ptr->next;
}

errorno = Py_None;
Py_INCREF(Py_None);

Expand Down
25 changes: 24 additions & 1 deletion tests/tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
class DNSTest(unittest.TestCase):

def setUp(self):
self.channel = pycares.Channel(timeout=1.0, tries=1)
self.channel = pycares.Channel(timeout=5.0, tries=1)

def tearDown(self):
self.channel = None
Expand Down Expand Up @@ -174,6 +174,29 @@ def cb(result, errorno):
self.assertEqual(type(r), pycares.ares_query_txt_result)
self.assertTrue(r.ttl >= 0)

def test_query_txt_chunked(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('jobscoutdaily.com', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertEqual(self.errorno, None)
# If the chunks are aggregated, only one TXT record should be visible. Three would show if they are not properly merged.
# jobscoutdaily.com. 21600 IN TXT "v=spf1 " "include:emailcampaigns.net include:spf.dynect.net include:ccsend.com include:_spf.elasticemail.com ip4:67.200.116.86 ip4:67.200.116.90 ip4:67.200.116.97 ip4:67.200.116.111 ip4:74.199.198.2 " " ~all"
self.assertEqual(len(self.result), 1)
self.assertEqual(self.result[0].text, "v=spf1 include:emailcampaigns.net include:spf.dynect.net include:ccsend.com include:_spf.elasticemail.com ip4:67.200.116.86 ip4:67.200.116.90 ip4:67.200.116.97 ip4:67.200.116.111 ip4:74.199.198.2 ~all")

def test_query_txt_multiple_chunked(self):
self.result, self.errorno = None, None
def cb(result, errorno):
self.result, self.errorno = result, errorno
self.channel.query('s-pulse.co.jp', pycares.QUERY_TYPE_TXT, cb)
self.wait()
self.assertEqual(self.errorno, None)
# s-pulse.co.jp. 3600 IN TXT "MS=ms18955624"
# s-pulse.co.jp. 3600 IN TXT "v=spf1 " "include:spf-bma.mpme.jp ip4:202.248.11.9 ip4:202.248.11.10 " "ip4:218.223.68.132 ip4:218.223.68.77 ip4:210.254.139.121 " "ip4:211.128.73.121 ip4:210.254.139.122 ip4:211.128.73.122 " "ip4:210.254.139.123 ip4:211.128.73.123 ip4:210.254.139.124 " "ip4:211.128.73.124 ip4:210.254.139.13 ip4:211.128.73.13 " "ip4:52.68.199.198 include:spf.betrend.com " "include:spf.protection.outlook.com " "~all"
self.assertEqual(len(self.result), 2)

def test_query_soa(self):
self.result, self.errorno = None, None
def cb(result, errorno):
Expand Down

0 comments on commit 65417ef

Please sign in to comment.