diff --git a/tests/testlib/s2n_ktls_test_utils.c b/tests/testlib/s2n_ktls_test_utils.c index 20c0897c4e3..08c1a060d41 100644 --- a/tests/testlib/s2n_ktls_test_utils.c +++ b/tests/testlib/s2n_ktls_test_utils.c @@ -43,11 +43,12 @@ static S2N_RESULT s2n_test_ktls_update_prev_header_len(struct s2n_test_ktls_io_s ssize_t s2n_test_ktls_sendmsg_io_stuffer(void *io_context, const struct msghdr *msg) { + errno = EINVAL; POSIX_ENSURE_REF(msg); - POSIX_ENSURE_REF(msg->msg_iov); struct s2n_test_ktls_io_stuffer *io_ctx = (struct s2n_test_ktls_io_stuffer *) io_context; POSIX_ENSURE_REF(io_ctx); + struct s2n_stuffer *data_buffer = &io_ctx->data_buffer; io_ctx->sendmsg_invoked_count++; uint8_t record_type = 0; @@ -56,22 +57,20 @@ ssize_t s2n_test_ktls_sendmsg_io_stuffer(void *io_context, const struct msghdr * size_t total_len = 0; for (size_t count = 0; count < msg->msg_iovlen; count++) { + POSIX_ENSURE_REF(msg->msg_iov); uint8_t *buf = msg->msg_iov[count].iov_base; - POSIX_ENSURE_REF(buf); size_t len = msg->msg_iov[count].iov_len; - if (s2n_stuffer_write_bytes(&io_ctx->data_buffer, buf, len) < 0) { - /* This mock implementation only handles partial writes for msg_iovlen == 1. - * - * This simplifies the implementation and importantly doesn't limit our test - * coverage because partial writes are handled the same regardless of - * msg_iovlen. */ - POSIX_ENSURE(msg->msg_iovlen == 1, S2N_ERR_SAFETY); - + if (s2n_stuffer_write_bytes(data_buffer, buf, len) != S2N_SUCCESS) { + size_t partial_len = MIN(len, s2n_stuffer_space_remaining(data_buffer)); + POSIX_GUARD(s2n_stuffer_write_bytes(data_buffer, buf, partial_len)); + total_len += partial_len; + if (total_len) { + break; + } errno = EAGAIN; return -1; } - total_len += len; } if (total_len) { @@ -90,6 +89,7 @@ ssize_t s2n_test_ktls_sendmsg_io_stuffer(void *io_context, const struct msghdr * * are of the same type. */ ssize_t s2n_test_ktls_recvmsg_io_stuffer(void *io_context, struct msghdr *msg) { + errno = EINVAL; POSIX_ENSURE_REF(msg); POSIX_ENSURE_REF(msg->msg_iov); @@ -233,3 +233,17 @@ S2N_RESULT s2n_test_validate_ancillary(struct s2n_test_ktls_io_stuffer *ktls_io, return S2N_RESULT_OK; } + +S2N_RESULT s2n_test_records_in_ancillary(struct s2n_test_ktls_io_stuffer *ktls_io, + uint16_t expected_records) +{ + RESULT_ENSURE_REF(ktls_io); + + size_t size = s2n_stuffer_data_available(&ktls_io->ancillary_buffer); + size_t records = size / S2N_TEST_KTLS_MOCK_HEADER_SIZE; + size_t extra = size % S2N_TEST_KTLS_MOCK_HEADER_SIZE; + + RESULT_ENSURE_EQ(records, expected_records); + RESULT_ENSURE_EQ(extra, 0); + return S2N_RESULT_OK; +} diff --git a/tests/testlib/s2n_ktls_test_utils.h b/tests/testlib/s2n_ktls_test_utils.h index aed6b546bd8..255d7efc873 100644 --- a/tests/testlib/s2n_ktls_test_utils.h +++ b/tests/testlib/s2n_ktls_test_utils.h @@ -72,3 +72,5 @@ S2N_RESULT s2n_test_validate_data(struct s2n_test_ktls_io_stuffer *ktls_io, uint uint16_t expected_len); S2N_RESULT s2n_test_validate_ancillary(struct s2n_test_ktls_io_stuffer *ktls_io, uint8_t expected_record_type, uint16_t expected_len); +S2N_RESULT s2n_test_records_in_ancillary(struct s2n_test_ktls_io_stuffer *ktls_io, + uint16_t expected_records); diff --git a/tests/unit/s2n_ktls_io_test.c b/tests/unit/s2n_ktls_io_test.c index 7468aac6c06..57a29896474 100644 --- a/tests/unit/s2n_ktls_io_test.c +++ b/tests/unit/s2n_ktls_io_test.c @@ -72,6 +72,82 @@ ssize_t s2n_test_ktls_recvmsg_io_stuffer_and_ctrunc(void *io_context, struct msg return ret; } +struct s2n_test_iovecs { + struct iovec *iovecs; + size_t iovecs_count; +}; + +static S2N_CLEANUP_RESULT s2n_test_iovecs_free(struct s2n_test_iovecs *in) +{ + RESULT_ENSURE_REF(in); + for (size_t i = 0; i < in->iovecs_count; i++) { + RESULT_GUARD_POSIX(s2n_free_object((uint8_t **) &in->iovecs[i].iov_base, + in->iovecs[i].iov_len)); + } + RESULT_GUARD_POSIX(s2n_free_object((uint8_t **) &in->iovecs, + sizeof(struct iovec) * in->iovecs_count)); + return S2N_RESULT_OK; +} + +/* Testing only with contiguous data could hide errors. + * We should use iovecs where every buffer is allocated separately. + */ +static S2N_RESULT s2n_test_split_data(struct s2n_test_iovecs *iovecs, struct s2n_blob *data) +{ + RESULT_ENSURE_REF(iovecs); + RESULT_ENSURE_REF(data); + + struct s2n_stuffer in = { 0 }; + RESULT_GUARD_POSIX(s2n_stuffer_init_written(&in, data)); + + for (size_t i = 0; i < iovecs->iovecs_count; i++) { + if (iovecs->iovecs[i].iov_len == 0) { + continue; + } + struct s2n_blob mem = { 0 }; + RESULT_GUARD_POSIX(s2n_alloc(&mem, iovecs->iovecs[i].iov_len)); + RESULT_GUARD_POSIX(s2n_stuffer_read(&in, &mem)); + iovecs->iovecs[i].iov_base = mem.data; + } + RESULT_ENSURE_EQ(s2n_stuffer_data_available(&in), 0); + return S2N_RESULT_OK; +} + +static S2N_RESULT s2n_test_new_iovecs(struct s2n_test_iovecs *iovecs, + struct s2n_blob *data, const size_t *lens, size_t lens_count) +{ + RESULT_ENSURE_REF(iovecs); + RESULT_ENSURE_REF(data); + RESULT_ENSURE_REF(lens); + + size_t len_total = 0; + for (size_t i = 0; i < lens_count; i++) { + len_total += lens[i]; + } + RESULT_ENSURE_LTE(len_total, data->size); + + size_t iovecs_count = lens_count; + if (len_total < data->size) { + iovecs_count++; + } + + struct s2n_blob iovecs_mem = { 0 }; + RESULT_GUARD_POSIX(s2n_alloc(&iovecs_mem, sizeof(struct iovec) * iovecs_count)); + RESULT_GUARD_POSIX(s2n_blob_zero(&iovecs_mem)); + iovecs->iovecs = (struct iovec *) iovecs_mem.data; + iovecs->iovecs_count = iovecs_count; + + for (size_t i = 0; i < lens_count; i++) { + iovecs->iovecs[i].iov_len = lens[i]; + } + if (lens_count < iovecs_count) { + iovecs->iovecs[lens_count].iov_len = data->size - len_total; + } + + RESULT_GUARD(s2n_test_split_data(iovecs, data)); + return S2N_RESULT_OK; +} + int main(int argc, char **argv) { BEGIN_TEST(); @@ -523,5 +599,264 @@ int main(int argc, char **argv) }; }; + /* Test s2n_ktls_send */ + { + const size_t test_iov_lens[] = { 10, 0, 1, 5, 100, 100, 10 }; + + /* Safety */ + { + struct s2n_connection conn = { 0 }; + s2n_blocked_status blocked = 0; + const struct iovec test_iovec = { .iov_base = &blocked, .iov_len = 1 }; + + EXPECT_FAILURE_WITH_ERRNO( + s2n_ktls_sendv_with_offset(NULL, &test_iovec, 1, 0, &blocked), + S2N_ERR_NULL); + EXPECT_FAILURE_WITH_ERRNO( + s2n_ktls_sendv_with_offset(&conn, NULL, 1, 0, &blocked), + S2N_ERR_NULL); + EXPECT_FAILURE_WITH_ERRNO( + s2n_ktls_sendv_with_offset(&conn, &test_iovec, 1, 0, NULL), + S2N_ERR_NULL); + EXPECT_FAILURE_WITH_ERRNO( + s2n_ktls_sendv_with_offset(&conn, &test_iovec, -1, 0, &blocked), + S2N_ERR_INVALID_ARGUMENT); + EXPECT_FAILURE_WITH_ERRNO( + s2n_ktls_sendv_with_offset(&conn, &test_iovec, 1, -1, &blocked), + S2N_ERR_INVALID_ARGUMENT); + }; + + /* Test: Basic send with single iovec */ + { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer out = { 0 }, + s2n_ktls_io_stuffer_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer_send(conn, &out)); + + const struct iovec test_iovec = { + .iov_base = test_data, + .iov_len = sizeof(test_data), + }; + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_EQUAL( + s2n_ktls_sendv_with_offset(conn, &test_iovec, 1, 0, &blocked), + sizeof(test_data)); + + EXPECT_EQUAL(out.sendmsg_invoked_count, 1); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + EXPECT_OK(s2n_test_validate_ancillary(&out, TLS_APPLICATION_DATA, sizeof(test_data))); + EXPECT_OK(s2n_test_validate_data(&out, test_data, sizeof(test_data))); + }; + + /* Test: Handle IO error from sendmsg */ + { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + struct s2n_test_ktls_io_fail_ctx io_ctx = { .errno_code = EINVAL }; + EXPECT_OK(s2n_ktls_set_sendmsg_cb(conn, s2n_test_ktls_sendmsg_fail, &io_ctx)); + + const struct iovec test_iovec = { + .iov_base = test_data, + .iov_len = sizeof(test_data), + }; + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + EXPECT_FAILURE_WITH_ERRNO( + s2n_ktls_sendv_with_offset(conn, &test_iovec, 1, 0, &blocked), + S2N_ERR_IO); + EXPECT_EQUAL(io_ctx.invoked_count, 1); + EXPECT_EQUAL(blocked, S2N_BLOCKED_ON_WRITE); + }; + + /* Test: Send nothing */ + { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer out = { 0 }, + s2n_ktls_io_stuffer_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer_send(conn, &out)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + const struct iovec test_iovec = { + .iov_base = test_data, + .iov_len = 0, + }; + + /* Send nothing with zero-length iovec array */ + EXPECT_EQUAL(s2n_ktls_sendv_with_offset(conn, NULL, 0, 0, &blocked), 0); + EXPECT_EQUAL(out.sendmsg_invoked_count, 1); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + EXPECT_OK(s2n_test_records_in_ancillary(&out, 0)); + + /* Send nothing with iovec array with zero-length buffer */ + EXPECT_EQUAL(s2n_ktls_sendv_with_offset(conn, &test_iovec, 1, 0, &blocked), 0); + EXPECT_EQUAL(out.sendmsg_invoked_count, 2); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + EXPECT_OK(s2n_test_records_in_ancillary(&out, 0)); + }; + + /* Test: Send with multiple iovecs */ + { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer out = { 0 }, + s2n_ktls_io_stuffer_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer_send(conn, &out)); + + DEFER_CLEANUP(struct s2n_test_iovecs test_iovecs = { 0 }, s2n_test_iovecs_free); + EXPECT_OK(s2n_test_new_iovecs(&test_iovecs, &test_data_blob, + test_iov_lens, s2n_array_len(test_iov_lens))); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + ssize_t result = s2n_ktls_sendv_with_offset(conn, + test_iovecs.iovecs, test_iovecs.iovecs_count, 0, &blocked); + EXPECT_EQUAL(result, sizeof(test_data)); + + EXPECT_EQUAL(out.sendmsg_invoked_count, 1); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + EXPECT_OK(s2n_test_validate_ancillary(&out, TLS_APPLICATION_DATA, sizeof(test_data))); + EXPECT_OK(s2n_test_validate_data(&out, test_data, sizeof(test_data))); + }; + + /* Test: Send with very large number of iovecs */ + { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer out = { 0 }, + s2n_ktls_io_stuffer_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer_send(conn, &out)); + + size_t many_iov_lens[1000] = { 0 }; + DEFER_CLEANUP(struct s2n_test_iovecs test_iovecs = { 0 }, s2n_test_iovecs_free); + EXPECT_OK(s2n_test_new_iovecs(&test_iovecs, &test_data_blob, + many_iov_lens, s2n_array_len(many_iov_lens))); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + ssize_t result = s2n_ktls_sendv_with_offset(conn, + test_iovecs.iovecs, test_iovecs.iovecs_count, 0, &blocked); + EXPECT_EQUAL(result, sizeof(test_data)); + + EXPECT_EQUAL(out.sendmsg_invoked_count, 1); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + EXPECT_OK(s2n_test_validate_ancillary(&out, TLS_APPLICATION_DATA, sizeof(test_data))); + EXPECT_OK(s2n_test_validate_data(&out, test_data, sizeof(test_data))); + }; + + /* Test: Send with offset */ + { + DEFER_CLEANUP(struct s2n_test_iovecs test_iovecs = { 0 }, s2n_test_iovecs_free); + EXPECT_OK(s2n_test_new_iovecs(&test_iovecs, &test_data_blob, + test_iov_lens, s2n_array_len(test_iov_lens))); + + /* Test: Send with invalid / too large offset */ + { + const size_t bad_offset = sizeof(test_data) + 1; + + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer out = { 0 }, + s2n_ktls_io_stuffer_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer_send(conn, &out)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + ssize_t result = s2n_ktls_sendv_with_offset(conn, + test_iovecs.iovecs, test_iovecs.iovecs_count, bad_offset, &blocked); + EXPECT_FAILURE_WITH_ERRNO(result, S2N_ERR_INVALID_ARGUMENT); + + EXPECT_EQUAL(out.sendmsg_invoked_count, 0); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + EXPECT_OK(s2n_test_records_in_ancillary(&out, 0)); + } + + /* Test: Send with all possible valid offsets */ + for (size_t offset = 0; offset < sizeof(test_data); offset++) { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer out = { 0 }, + s2n_ktls_io_stuffer_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer_send(conn, &out)); + + const size_t expected_sent = sizeof(test_data) - offset; + EXPECT_TRUE(expected_sent > 0); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + ssize_t result = s2n_ktls_sendv_with_offset(conn, + test_iovecs.iovecs, test_iovecs.iovecs_count, offset, &blocked); + EXPECT_EQUAL(result, expected_sent); + + EXPECT_EQUAL(out.sendmsg_invoked_count, 1); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + EXPECT_OK(s2n_test_validate_ancillary(&out, TLS_APPLICATION_DATA, expected_sent)); + EXPECT_OK(s2n_test_validate_data(&out, test_data + offset, expected_sent)); + } + }; + + /* Test: Partial write */ + { + DEFER_CLEANUP(struct s2n_test_iovecs test_iovecs = { 0 }, s2n_test_iovecs_free); + EXPECT_OK(s2n_test_new_iovecs(&test_iovecs, &test_data_blob, + test_iov_lens, s2n_array_len(test_iov_lens))); + + /* Test with all possible partial write lengths */ + for (size_t size = 1; size < sizeof(test_data); size++) { + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + DEFER_CLEANUP(struct s2n_test_ktls_io_stuffer out = { 0 }, + s2n_ktls_io_stuffer_free); + EXPECT_OK(s2n_test_init_ktls_io_stuffer_send(conn, &out)); + EXPECT_SUCCESS(s2n_stuffer_free(&out.data_buffer)); + EXPECT_SUCCESS(s2n_stuffer_alloc(&out.data_buffer, size)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + ssize_t result = s2n_ktls_sendv_with_offset(conn, + test_iovecs.iovecs, test_iovecs.iovecs_count, 0, &blocked); + EXPECT_EQUAL(result, size); + EXPECT_EQUAL(blocked, S2N_NOT_BLOCKED); + + EXPECT_EQUAL(out.sendmsg_invoked_count, 1); + EXPECT_OK(s2n_test_validate_data(&out, test_data, size)); + } + }; + + /* Test: IO would block */ + { + DEFER_CLEANUP(struct s2n_test_iovecs test_iovecs = { 0 }, s2n_test_iovecs_free); + EXPECT_OK(s2n_test_new_iovecs(&test_iovecs, &test_data_blob, + test_iov_lens, s2n_array_len(test_iov_lens))); + + DEFER_CLEANUP(struct s2n_connection *conn = s2n_connection_new(S2N_SERVER), + s2n_connection_ptr_free); + EXPECT_NOT_NULL(conn); + + struct s2n_test_ktls_io_fail_ctx io_ctx = { .errno_code = EAGAIN }; + EXPECT_OK(s2n_ktls_set_sendmsg_cb(conn, s2n_test_ktls_sendmsg_fail, &io_ctx)); + + s2n_blocked_status blocked = S2N_NOT_BLOCKED; + ssize_t result = s2n_ktls_sendv_with_offset(conn, + test_iovecs.iovecs, test_iovecs.iovecs_count, 0, &blocked); + EXPECT_FAILURE_WITH_ERRNO(result, S2N_ERR_IO_BLOCKED); + EXPECT_EQUAL(io_ctx.invoked_count, 1); + EXPECT_EQUAL(blocked, S2N_BLOCKED_ON_WRITE); + }; + }; + END_TEST(); } diff --git a/tests/unit/s2n_ktls_test_utils_test.c b/tests/unit/s2n_ktls_test_utils_test.c index c68da12ce1f..965ba6437bf 100644 --- a/tests/unit/s2n_ktls_test_utils_test.c +++ b/tests/unit/s2n_ktls_test_utils_test.c @@ -274,7 +274,7 @@ int main(int argc, char **argv) EXPECT_EQUAL(io_pair.client_in.sendmsg_invoked_count, blocked_invoked_count + 1); }; - /* Attempt partial write with iov_len > 1 and expect error */ + /* Partial write with iov_len > 1 */ { DEFER_CLEANUP(struct s2n_connection *server = s2n_connection_new(S2N_SERVER), s2n_connection_ptr_free); @@ -294,16 +294,17 @@ int main(int argc, char **argv) send_msg_iov[i].iov_len = S2N_TEST_TO_SEND; test_data_ptr += S2N_TEST_TO_SEND; } + struct msghdr send_msg = { .msg_iov = send_msg_iov, .msg_iovlen = S2N_TEST_MSG_IOVLEN }; char control_buf[S2N_CONTROL_BUF_SIZE] = { 0 }; EXPECT_OK(s2n_ktls_set_control_data(&send_msg, control_buf, sizeof(control_buf), S2N_TLS_SET_RECORD_TYPE, test_record_type)); - EXPECT_FAILURE_WITH_ERRNO(s2n_test_ktls_sendmsg_io_stuffer(server->send_io_context, &send_msg), - S2N_ERR_SAFETY); - /* validate no record were sent */ - EXPECT_EQUAL(s2n_stuffer_data_available(&io_pair.client_in.ancillary_buffer), 0); + EXPECT_EQUAL(s2n_test_ktls_sendmsg_io_stuffer(server->send_io_context, &send_msg), + S2N_TEST_TO_SEND); EXPECT_EQUAL(io_pair.client_in.sendmsg_invoked_count, 1); + EXPECT_OK(s2n_test_validate_ancillary(&io_pair.client_in, test_record_type, S2N_TEST_TO_SEND)); + EXPECT_OK(s2n_test_validate_data(&io_pair.client_in, test_data, S2N_TEST_TO_SEND)); }; }; diff --git a/tls/s2n_ktls.h b/tls/s2n_ktls.h index 3fc588e2340..6bf4e90dfb3 100644 --- a/tls/s2n_ktls.h +++ b/tls/s2n_ktls.h @@ -44,6 +44,9 @@ S2N_RESULT s2n_ktls_sendmsg(struct s2n_connection *conn, uint8_t record_type, co S2N_RESULT s2n_ktls_recvmsg(struct s2n_connection *conn, uint8_t *record_type, uint8_t *buf, size_t buf_len, s2n_blocked_status *blocked, size_t *bytes_read); +ssize_t s2n_ktls_sendv_with_offset(struct s2n_connection *conn, const struct iovec *bufs, + ssize_t count, ssize_t offs, s2n_blocked_status *blocked); + /* These functions will be part of the public API. */ int s2n_connection_ktls_enable_send(struct s2n_connection *conn); int s2n_connection_ktls_enable_recv(struct s2n_connection *conn); diff --git a/tls/s2n_ktls_io.c b/tls/s2n_ktls_io.c index b3d2d25cd71..3b0603030a6 100644 --- a/tls/s2n_ktls_io.c +++ b/tls/s2n_ktls_io.c @@ -187,9 +187,9 @@ S2N_RESULT s2n_ktls_sendmsg(struct s2n_connection *conn, uint8_t record_type, co size_t msg_iovlen, s2n_blocked_status *blocked, size_t *bytes_written) { RESULT_ENSURE_REF(bytes_written); - RESULT_ENSURE_REF(msg_iov); RESULT_ENSURE_REF(blocked); RESULT_ENSURE_REF(conn); + RESULT_ENSURE(msg_iov != NULL || msg_iovlen == 0, S2N_ERR_NULL); *blocked = S2N_BLOCKED_ON_WRITE; *bytes_written = 0; @@ -271,3 +271,55 @@ S2N_RESULT s2n_ktls_recvmsg(struct s2n_connection *conn, uint8_t *record_type, u *bytes_read = result; return S2N_RESULT_OK; } + +static S2N_RESULT s2n_ktls_new_iovecs_with_offset(const struct iovec *bufs, + size_t count, size_t offs, struct s2n_blob *mem) +{ + RESULT_ENSURE(bufs != NULL || count == 0, S2N_ERR_NULL); + RESULT_ENSURE_REF(mem); + + RESULT_GUARD_POSIX(s2n_realloc(mem, sizeof(struct iovec) * count)); + struct iovec *new_bufs = (struct iovec *) (void *) mem->data; + RESULT_ENSURE_REF(new_bufs); + + for (size_t i = 0; i < count; i++) { + size_t old_len = bufs[i].iov_len; + if (offs < old_len) { + new_bufs[i].iov_base = (uint8_t *) bufs[i].iov_base + offs; + new_bufs[i].iov_len = old_len - offs; + offs = 0; + } else { + /* Zero any iovec skipped by the offset. + * We could change the count of the copy instead, but this is simpler. */ + new_bufs[i].iov_base = NULL; + new_bufs[i].iov_len = 0; + offs -= old_len; + } + } + /* The offset cannot be greater than the total size of all iovecs */ + RESULT_ENSURE(offs == 0, S2N_ERR_INVALID_ARGUMENT); + return S2N_RESULT_OK; +} + +ssize_t s2n_ktls_sendv_with_offset(struct s2n_connection *conn, const struct iovec *bufs, + ssize_t count_in, ssize_t offs_in, s2n_blocked_status *blocked) +{ + POSIX_ENSURE(count_in >= 0, S2N_ERR_INVALID_ARGUMENT); + size_t count = count_in; + POSIX_ENSURE(offs_in >= 0, S2N_ERR_INVALID_ARGUMENT); + size_t offs = offs_in; + + DEFER_CLEANUP(struct s2n_blob new_bufs = { 0 }, s2n_free); + if (offs > 0) { + /* We can't modify the application-owned iovecs to reflect the offset. + * Therefore, we must alloc and modify a copy. + */ + POSIX_GUARD_RESULT(s2n_ktls_new_iovecs_with_offset(bufs, count, offs, &new_bufs)); + bufs = (const struct iovec *) (void *) new_bufs.data; + } + + size_t bytes_written = 0; + POSIX_GUARD_RESULT(s2n_ktls_sendmsg(conn, TLS_APPLICATION_DATA, bufs, count, + blocked, &bytes_written)); + return bytes_written; +} diff --git a/tls/s2n_send.c b/tls/s2n_send.c index 065c4bf49fa..fdd40023fa3 100644 --- a/tls/s2n_send.c +++ b/tls/s2n_send.c @@ -24,6 +24,7 @@ #include "tls/s2n_cipher_suites.h" #include "tls/s2n_connection.h" #include "tls/s2n_handshake.h" +#include "tls/s2n_ktls.h" #include "tls/s2n_post_handshake.h" #include "tls/s2n_record.h" #include "utils/s2n_blob.h" @@ -114,6 +115,10 @@ ssize_t s2n_sendv_with_offset_impl(struct s2n_connection *conn, const struct iov POSIX_ENSURE(s2n_connection_check_io_status(conn, S2N_IO_WRITABLE), S2N_ERR_CLOSED); POSIX_ENSURE(!s2n_connection_is_quic_enabled(conn), S2N_ERR_UNSUPPORTED_WITH_QUIC); + if (conn->ktls_send_enabled) { + return s2n_ktls_sendv_with_offset(conn, bufs, count, offs, blocked); + } + /* Flush any pending I/O */ POSIX_GUARD(s2n_flush(conn, blocked));