mirror of
https://github.com/fluencelabs/redis
synced 2025-03-18 16:40:50 +00:00
Merge pull request #5315 from soloestoy/optimize-parsing-large-bulk
networking: optimize parsing large bulk greater than 32k
This commit is contained in:
commit
d60c17cbb3
@ -1294,19 +1294,22 @@ int processMultibulkBuffer(client *c) {
|
||||
|
||||
c->qb_pos = newline-c->querybuf+2;
|
||||
if (ll >= PROTO_MBULK_BIG_ARG) {
|
||||
size_t qblen;
|
||||
|
||||
/* If we are going to read a large object from network
|
||||
* try to make it likely that it will start at c->querybuf
|
||||
* boundary so that we can optimize object creation
|
||||
* avoiding a large copy of data. */
|
||||
sdsrange(c->querybuf,c->qb_pos,-1);
|
||||
c->qb_pos = 0;
|
||||
qblen = sdslen(c->querybuf);
|
||||
/* Hint the sds library about the amount of bytes this string is
|
||||
* going to contain. */
|
||||
if (qblen < (size_t)ll+2)
|
||||
c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2-qblen);
|
||||
* avoiding a large copy of data.
|
||||
*
|
||||
* But only when the data we have not parsed is less than
|
||||
* or equal to ll+2. If the data length is greater than
|
||||
* ll+2, trimming querybuf is just a waste of time, because
|
||||
* at this time the querybuf contains not only our bulk. */
|
||||
if (sdslen(c->querybuf)-c->qb_pos <= (size_t)ll+2) {
|
||||
sdsrange(c->querybuf,c->qb_pos,-1);
|
||||
c->qb_pos = 0;
|
||||
/* Hint the sds library about the amount of bytes this string is
|
||||
* going to contain. */
|
||||
c->querybuf = sdsMakeRoomFor(c->querybuf,ll+2);
|
||||
}
|
||||
}
|
||||
c->bulklen = ll;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user