rtc_plugins/util/RTCContext.cpp

484 lines
19 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include "RTCContext.h"
void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo) {
//std::cout << "RTCContext::onRoom():" << roomInfo.roomId << "," << roomInfo.displayName << "," << roomInfo.userId << "," << roomInfo.message;
std::cout << "RTCContext::onRoom()" << std::endl;
std::lock_guard<std::mutex> lock(mutex_);
isOnRoom_ = true;
}
void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId, RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) {
std::cout << "RTCContext::onConsumer():" << consumerInfo.roomId << "," << consumerInfo.displayName << "," << consumerInfo.channelIndex;
std::lock_guard<std::mutex> lock(mutex_);
isOnConsumer_ = true;
//std::cout << "RTCContext::onConsumer()" << std::endl;
}
void RTCContext::onRender(const char* roomId, const char* peerId,
RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) {
std::cout << "RTCContext::onRender()" << std::endl;
}
void RTCContext::onCallBackMessage(uint32_t msgId, const char* msg) {
std::lock_guard<std::mutex> lock(mutex_);
if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) {
std::cout << "receive join multi room callback" << msgId;
isJoinMultiRoom_ = true;
}
std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg;
//std::cout << "RTCContext::onCallBackMessage()" << std::endl;
}
void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) {
//std::cout << "RTCContext::onCallBackCustomData(), obj:" << object.peerId << "," << object.data << "," << object.data_length;
std::cout << "RTCContext::onCallBackCustomData()" << std::endl;
}
void RTCContext::onSoundLevelUpdate(const char* roomId, const char* peerId, uint16_t audioSourceType,
uint8_t channelIndex, uint16_t volume, int32_t vad)
{
std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl;
}
/*
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
namespace py = boost::python;
PyGILState_STATE gstate = PyGILState_Ensure();
try {
std::cout << "-----------------------------------" << std::endl;
std::cout << "dataCount:" << audioFrame.dataCount << std::endl;
std::cout << "dataCount value: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
std::cout << "onAudioProcess, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in onAudioProcess" << std::endl;
} else {
std::cout << "numpyApi_ is not null in onAudioProcess:" << numpyApi_[93] << std::endl;
}
//auto numpyApi = RTCContext::numpy_api();
std::cout << "step1" << std::endl;
if (!numpyApi_) {
PyGILState_Release(gstate);
throw std::runtime_error("NumPy C-API not initialized. Call import_array() in module init");
}
std::cout << "step2" << std::endl;
using PyArray_SimpleNew_t = PyObject*(*)(int, npy_intp*, int);
void* func_ptr = numpyApi_[93];
std::cout << "Raw function pointer: " << func_ptr << std::endl;
auto ptmp = (PyObject*(*)(int, npy_intp*, int))numpyApi_[93];
std::cout << "ptmp is:" << ptmp << std::endl;
std::cout << "Pointer sizes:\n"
<< "void*: " << sizeof(void*) << "\n"
<< "FunctionPtr: " << sizeof(PyObject*(*)(int, npy_intp*, int)) << std::endl;
// 2. 使用memcpy避免编译器优化问题
PyArray_SimpleNew_t PyArray_SimpleNew;
static_assert(sizeof(func_ptr) == sizeof(PyArray_SimpleNew),
"Pointer size mismatch");
std::cout << "step3" << std::endl;
memcpy(&PyArray_SimpleNew, &func_ptr, sizeof(func_ptr));
//auto PyArray_SimpleNew = reinterpret_cast<PyArray_SimpleNew_t>(numpyApi_[93]);
std::cout << "step4, PyArray_SimpleNew:" << PyArray_SimpleNew << std::endl;
// 3. 严格校验输入数据
if (!audioFrame.data || audioFrame.dataCount <= 0) {
PyGILState_Release(gstate);
throw std::invalid_argument("Invalid audio frame data");
}
std::cout << "step5" << std::endl;
// 4. 安全创建维度数组(带边界检查)
if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
PyGILState_Release(gstate);
throw std::overflow_error("Audio frame size exceeds maximum limit");
}
std::cout << "step6" << std::endl;
npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
std::cout << "step7" << std::endl;
// 5. 创建NumPy数组带内存保护
PyObject* pyArray = nullptr;
pyArray = PyArray_SimpleNew(1, dims, NPY_INT16);
std::cout << "step8" << std::endl;
if (!pyArray) {
PyGILState_Release(gstate);
throw std::bad_alloc();
}
std::cout << "step9" << std::endl;
// 6. 安全拷贝数据(带对齐检查)
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
Py_DECREF(pyArray);
PyGILState_Release(gstate);
throw std::runtime_error("Unaligned audio data pointer");
}
std::cout << "step10" << std::endl;
std::memcpy(PyArray_DATA(reinterpret_cast<PyArrayObject*>(pyArray)),
audioFrame.data,
audioFrame.dataCount * sizeof(int16_t));
std::cout << "step11" << std::endl;
// 7. 执行回调(带引用计数保护)
if (!pyCallback_.is_none()) {
try {
pyCallback_(
py::handle<>(pyArray), // 自动管理引用
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
} catch (...) {
Py_DECREF(pyArray);
throw; // 重新抛出异常
}
}
std::cout << "step12" << std::endl;
// 8. 释放资源
Py_DECREF(pyArray);
std::cout << "step13" << std::endl;
PyGILState_Release(gstate);
std::cout << "step14" << std::endl;
} catch (const std::exception& e) {
std::cerr << "Audio process error: " << e.what() << std::endl;
PyErr_Print();
}
exit(0);
}
*/
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
namespace py = boost::python;
std::cout << "=== 开始音频处理 ===" << std::endl;
PyGILState_STATE gstate = PyGILState_Ensure();
std::cout << "[1] GIL 已获取" << std::endl;
try {
// 1. 输入参数校验
std::cout << "[2] 检查输入参数..." << std::endl;
std::cout << " dataCount: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
if (!audioFrame.data || audioFrame.dataCount <= 0) {
std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
PyGILState_Release(gstate);
throw std::invalid_argument("Invalid audio frame data");
}
if (audioFrame.dataCount > std::numeric_limits<npy_intp>::max()) {
std::cout << "[ERROR] 数据长度超过最大值" << std::endl;
PyGILState_Release(gstate);
throw std::overflow_error("Audio frame size exceeds maximum limit");
}
// 2. 准备NumPy数组参数
std::cout << "[3] 准备数组维度..." << std::endl;
npy_intp dims[1] = {static_cast<npy_intp>(audioFrame.dataCount)};
std::cout << " 维度设置完成: " << dims[0] << std::endl;
// 3. 获取NumPy模块
std::cout << "[4] 导入numpy.core.multiarray模块..." << std::endl;
PyObject* numpy_module = PyImport_ImportModule("numpy.core.multiarray");
if (!numpy_module) {
std::cout << "[ERROR] 无法导入numpy模块" << std::endl;
PyGILState_Release(gstate);
throw std::runtime_error("Failed to import numpy.core");
}
std::cout << " 模块导入成功: " << numpy_module << std::endl;
// 4. 获取empty函数
std::cout << "[5] 获取numpy.empty函数..." << std::endl;
PyObject* empty_func = PyObject_GetAttrString(numpy_module, "empty");
if (!empty_func) {
Py_DECREF(numpy_module);
std::cout << "[ERROR] 无法获取empty函数" << std::endl;
PyGILState_Release(gstate);
throw std::runtime_error("Failed to get numpy.empty");
}
std::cout << " 函数获取成功: " << empty_func << std::endl;
// 5. 构建参数元组
std::cout << "[6] 构建参数元组..." << std::endl;
PyObject* py_dims = PyTuple_New(1);
std::cout << "step1, " << py_dims << std::endl;
PyTuple_SetItem(py_dims, 0, PyLong_FromLong(dims[0]));
std::cout << "step2" << std::endl;
// 修正点显式处理dtype类型转换
PyArray_Descr* np_dtype = PyArray_DescrFromType(NPY_INT16);
std::cout << "step3" << std::endl;
if (!np_dtype) {
Py_DECREF(py_dims);
Py_DECREF(empty_func);
Py_DECREF(numpy_module);
std::cout << "[ERROR] 无法创建dtype" << std::endl;
PyGILState_Release(gstate);
throw std::runtime_error("Failed to create numpy dtype");
}
std::cout << "step4" << std::endl;
PyObject* dtype = reinterpret_cast<PyObject*>(np_dtype);
std::cout << "step5" << std::endl;
PyObject* args = PyTuple_Pack(2, py_dims, dtype);
std::cout << " 参数构建完成: " << args << std::endl;
// 6. 调用函数创建数组
std::cout << "[7] 调用numpy.empty创建数组..." << std::endl;
PyObject* pyArray = PyObject_Call(empty_func, args, nullptr);
std::cout << " 数组创建结果: " << pyArray << std::endl;
// 7. 清理临时对象
std::cout << "[8] 清理临时Python对象..." << std::endl;
Py_DECREF(args);
Py_DECREF(py_dims);
Py_DECREF(dtype);
Py_DECREF(empty_func);
Py_DECREF(numpy_module);
if (!pyArray) {
std::cout << "[ERROR] 数组创建失败" << std::endl;
PyGILState_Release(gstate);
throw std::bad_alloc();
}
// 8. 检查内存对齐
std::cout << "[9] 检查内存对齐..." << std::endl;
if (reinterpret_cast<uintptr_t>(audioFrame.data) % alignof(int16_t) != 0) {
Py_DECREF(pyArray);
std::cout << "[ERROR] 内存未对齐" << std::endl;
PyGILState_Release(gstate);
throw std::runtime_error("Unaligned audio data pointer");
}
// 9. 拷贝音频数据
std::cout << "[10] 拷贝音频数据到NumPy数组..." << std::endl;
std::memcpy(PyArray_DATA((PyArrayObject*)pyArray),
audioFrame.data,
audioFrame.dataCount * sizeof(int16_t));
std::cout << " 数据拷贝完成" << std::endl;
// 10. 执行Python回调
if (!pyCallback_.is_none()) {
std::cout << "[11] 准备执行Python回调..." << std::endl;
try {
pyCallback_(
py::handle<>(pyArray),
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
std::cout << " 回调执行完成" << std::endl;
} catch (...) {
std::cout << "[ERROR] 回调执行异常" << std::endl;
Py_DECREF(pyArray);
PyGILState_Release(gstate);
throw;
}
} else {
std::cout << "[11] 无回调函数设置" << std::endl;
}
// 11. 清理资源
std::cout << "[12] 释放Python数组资源..." << std::endl;
Py_DECREF(pyArray);
std::cout << "[13] 释放GIL..." << std::endl;
PyGILState_Release(gstate);
std::cout << "=== 音频处理完成 ===" << std::endl;
} catch (const std::exception& e) {
std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
PyGILState_Release(gstate);
PyErr_Print();
std::cerr << "Audio process error: " << e.what() << std::endl;
}
}
void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info)
{
std::cout << "-----------------------------------" << std::endl;
std::cout << "RTCContext::onProducer()" << std::endl;
}
bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId)
{
std::cout << "init, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in init" << std::endl;
} else {
std::cout << "numpyApi_ is not null in init" << std::endl;
}
mrtc::IMRTCEngineFactory * rtcFactory = mrtc::getMRTCEngineFactory();
if (!rtcFactory)
{
return false;
}
rtcEngine_ = rtcFactory->produceMRTCEngine();
if (!rtcEngine_)
{
return false;
}
mrtc::MRTCEngineConfig engineConfig;
strcpy(engineConfig.domain, domain);
strcpy(engineConfig.applicationId, appid);
strcpy(engineConfig.appSecrectKey, appSecrectKey);
engineConfig.port = port;
if (0 != rtcEngine_->init(engineConfig, this))
{
std::cout << "RTCContext::instance().init() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->setUserInfo(selfUserId, selfDisplayName, selfRoomId))
{
std::cout << "RTCContext::instance().setUserInfo() failed" << std::endl;
return false;
}
mrtc::MRTCJoinAuthority authority;
strcpy(authority.applicationId, appid);
strcpy(authority.appSecretKey, appSecrectKey);
mrtc::MRTCJoinConfig loginConfig;
if (0!= rtcEngine_->joinRoom(authority, loginConfig))
{
std::cout << "RTCContext::instance().joinRoom() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_ROOM, this))
{
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_CONSUMER, this))
{
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false;
}
return true;
}
bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex)
{
std::cout << "initRecv, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in initRecv" << std::endl;
} else {
std::cout << "numpyApi_ is not null in initRecv" << std::endl;
}
while (!isOnConsumer_)
{
std::cout << "wait for OnConsumer" << std::endl;
sleep(3);
}
std::cout << "registerSoundLevelListener" << std::endl;
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, destRoomId,
srcUserId, destChannelIndex, this);
if (0 != ret1)
{
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
return false;
}
std::cout << "muteAudio" << std::endl;
int16_t ret2 = rtcEngine_->muteAudio(destRoomId, srcUserId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM, false, destChannelIndex);
if (0 != ret2)
{
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
return false;
}
std::cout << "init recv succ" << std::endl;
return true;
}
bool RTCContext::initSend(const char* destRoomId, const int16_t destChannelIndex)
{
while (!isOnRoom_)
{
std::cout << "wait for OnRoom" << std::endl;
sleep(3);
}
std::cout << "join multi room" << std::endl;
int16_t ret1 = rtcEngine_->joinMultiRoom(destRoomId);
if (ret1 != 0)
{
std::cout << "joinMultiRoom fail, ret:" << ret1;
return false;
}
mrtc::MRTCAudioOption option;
strcpy(option.dstRoomId, destRoomId);
option.channelIndex = destChannelIndex;
std::cout << "startCustomAudio" << std::endl;
int16_t ret2 = rtcEngine_->startCustomAudio(option);
if (ret2 != 0)
{
std::cout << "startCustomAudio fail, ret:" << ret2;
return false;
}
std::cout << "init send succ" << std::endl;
return true;
}
void RTCContext::destorySend(const int16_t selfChannelIndex)
{
rtcEngine_->stopCustomAudio(selfChannelIndex);
}
int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels, uint64_t dataLength)
{
std::lock_guard<std::mutex> lock(mutex_);
if (pData_)
{
return rtcEngine_->sendCustomAudioData(channelIndex, pData, nSampleRate, nNumberOfChannels, dataLength);
}
return 0;
}
int16_t RTCContext::sendCustomAudioData(const int16_t channelIndex, void* customData, int32_t sampleRate,
uint64_t channelNum, uint64_t dataLen)
{
while(!isOnRoom_ || !isJoinMultiRoom_) {
std::cout << "wait for room and multi room before send" << std::endl;
sleep(3);
}
std::lock_guard<std::mutex> lock(mutex_);
if (customData == nullptr) {
std::cout << "customData is null" << std::endl;
return -1;
}
std::cout << "customData addr is:" << customData << std::endl;
return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate, channelNum, dataLen);
}
mrtc::IMRTCEngine* RTCContext::getRtcEngine() const
{
std::lock_guard<std::mutex> lock(mutex_);
return rtcEngine_;
}
void* RTCContext::getpData() const
{
std::lock_guard<std::mutex> lock(mutex_);
return pData_;
}
void RTCContext::setpData(void* pData)
{
std::lock_guard<std::mutex> lock(mutex_);
pData_ = pData;
}
void RTCContext::setPyCallback(boost::python::object callback) {
std::lock_guard<std::mutex> lock(mutex_);
pyCallback_ = callback;
}
void RTCContext::setNumpyApi(void **numpyApi) {
std::lock_guard<std::mutex> lock(mutex_);
numpyApi_ = numpyApi;
std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl;
}