rtc_plugins/util/RTCContext.cpp

348 lines
14 KiB
C++
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

#include "RTCContext.h"
void RTCContext::onRoom(uint32_t typeId, RTCENGINE_NAMESPACE::MRTCRoomInfo& roomInfo) {
//std::cout << "RTCContext::onRoom():" << roomInfo.roomId << "," << roomInfo.displayName << "," << roomInfo.userId << "," << roomInfo.message;
std::cout << "RTCContext::onRoom()" << std::endl;
std::lock_guard<std::mutex> lock(mutex_);
isOnRoom_ = true;
}
void RTCContext::onConsumer(uint32_t msgId, const char* roomId, const char* peerId,
RTCENGINE_NAMESPACE::MRTCConsumerInfo& consumerInfo) {
std::cout << "RTCContext::onConsumer():" << consumerInfo.roomId << "," << consumerInfo.displayName << ","
<< consumerInfo.channelIndex << std::endl;
if (isRecv_) {
std::lock_guard<std::mutex> lock(mutex_);
std::cout << "registerSoundLevelListener" << std::endl;
int16_t ret1 = rtcEngine_->registerSoundLevelListener(mrtc::TYPE_AUDIO_SOURCE_CUSTOM, consumerInfo.roomId,
peerId, consumerInfo.channelIndex, this);
if (0 != ret1) {
std::cout << "RTCContext::instance().registerSoundLevelListener() inUser failed, ret:" << ret1;
return;
}
std::cout << "muteAudio" << std::endl;
int16_t ret2 = rtcEngine_->muteAudio(consumerInfo.roomId, peerId, mrtc::TYPE_AUDIO_SOURCE_CUSTOM,
false, consumerInfo.channelIndex);
if (0 != ret2) {
std::cout << "RTCContext::instance().muteAudio() failed, ret:" << ret2;
return;
}
std::cout << "init recv succ" << std::endl;
}
}
void RTCContext::onRender(const char* roomId, const char* peerId,
RTCENGINE_NAMESPACE::MRTCVideoSourceType sourceType, const RTCENGINE_NAMESPACE::MRTCVideoFrame& videoFrame) {
std::cout << "RTCContext::onRender()" << std::endl;
}
void RTCContext::onCallBackMessage(uint32_t msgId, const char* msg) {
std::lock_guard<std::mutex> lock(mutex_);
if (msgId == (uint32_t)mrtc::JOIN_MULTI_ROOM_SUCCESS) {
std::cout << "receive join multi room callback" << msgId;
isJoinMultiRoom_ = true;
}
std::cout << "RTCContext::onCallBackMessage(), msgId:" << msgId << ", msg:" << msg;
//std::cout << "RTCContext::onCallBackMessage()" << std::endl;
}
void RTCContext::onCallBackCustomData(RTCENGINE_NAMESPACE::MRTCCustomDataObject object) {
//std::cout << "RTCContext::onCallBackCustomData(), obj:" << object.peerId << "," << object.data << "," << object.data_length;
std::cout << "RTCContext::onCallBackCustomData()" << std::endl;
}
void RTCContext::onSoundLevelUpdate(const char* roomId, const char* peerId, uint16_t audioSourceType,
uint8_t channelIndex, uint16_t volume, int32_t vad)
{
std::cout << "RTCContext::onSoundLevelUpdate()" << std::endl;
}
void RTCContext::onAudioProcess(const char* roomId, const char* peerId,
mrtc::MRTCAudioFrame& audioFrame,
mrtc::MRTCAudioSourceType audioSourceType)
{
std::cout << "=== 开始音频处理 ===" << std::endl;
std::cout << "audioFrame:" << audioFrame.dataCount << "," << audioFrame.sampleRate << "," <<
audioFrame.numChannels << "," << audioFrame.channelIndex << std::endl;
// 1. 获取GIL
std::cout << "[1] 获取GIL锁..." << std::endl;
if (!Py_IsInitialized()) {
std::cerr << "Python 解释器未初始化!" << std::endl;
return;
}
PyGILState_STATE gstate = PyGILState_Ensure();
namespace py = boost::python;
try {
// 2. 输入参数校验
std::cout << "[2] 检查输入参数..." << std::endl;
std::cout << " dataCount: " << audioFrame.dataCount
<< " (max: " << std::numeric_limits<npy_intp>::max() << ")" << std::endl;
if (!audioFrame.data || audioFrame.dataCount <= 0) {
std::cout << "[ERROR] 无效音频数据指针或长度" << std::endl;
throw std::invalid_argument("Invalid audio frame data");
}
const size_t data_size = audioFrame.dataCount * sizeof(int16_t);
std::cout << "step1" << std::endl;
namespace py = boost::python;
namespace np = boost::python::numpy;
npy_intp shape[1] = { static_cast<npy_intp>(audioFrame.dataCount) };
std::cout << "step2" << std::endl;
np::dtype dtype = np::dtype::get_builtin<int16_t>();
std::cout << "step3" << std::endl;
// 7. 执行回调
if (!pyCallback_.is_none()) {
std::cout << "[7] 准备执行Python回调..." << std::endl;
// 增加引用计数防止提前释放
//Py_INCREF(pyCallback_.ptr());
try {
/*
std::cout << " pyCallback_ type: " << Py_TYPE(pyCallback_.ptr())->tp_name << std::endl;
PyObject* repr = PyObject_Repr(pyCallback_.ptr());
if (repr) {
std::cout << " pyCallback_ repr: " << PyUnicode_AsUTF8(repr) << std::endl;
Py_DECREF(repr); // 必须手动释放
}
*/
//PyGILState_STATE gstate = PyGILState_Ensure();
np::ndarray audioArray = np::from_data(
audioFrame.data, // 数据指针
dtype, // 数据类型 (int16)
py::make_tuple(shape[0]), // 形状 (1D)
py::make_tuple(sizeof(int16_t)), // 步长
py::object() // 所有者Python管理
);
std::cout << " 数据拷贝完成" << std::endl;
pyCallback_(
audioArray, // numpy 数组
data_size, // 数据大小
audioFrame.dataCount,
audioFrame.sampleRate,
audioFrame.numChannels,
audioFrame.channelIndex
);
PyGILState_Release(gstate);
std::cout << " after callback" << std::endl;
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
if (value) {
PyObject* str = PyObject_Str(value);
if (str) {
std::cerr << "Python Error: " << PyUnicode_AsUTF8(str) << std::endl;
Py_DECREF(str);
}
}
Py_XDECREF(type);
Py_XDECREF(value);
Py_XDECREF(traceback);
//PyErr_Print();
throw std::runtime_error("Python callback error");
}
std::cout << " 回调执行成功" << std::endl;
} catch (const py::error_already_set& e) {
std::cerr << "[PYTHON ERROR] ";
PyErr_Print(); // 自动打印到stderr
// 可选:获取更详细的错误信息
if (PyErr_Occurred()) {
PyObject *type, *value, *traceback;
PyErr_Fetch(&type, &value, &traceback);
std::cerr << "Details: "
<< PyUnicode_AsUTF8(PyObject_Str(value)) << std::endl;
PyErr_Restore(type, value, traceback);
}
//Py_DECREF(pyCallback_.ptr());
} catch (...) {
std::cout << "[ERROR] 回调执行失败" << std::endl;
//Py_DECREF(pyCallback_.ptr());
throw;
}
//Py_DECREF(pyCallback_.ptr());
} else {
std::cout << "[7] 无回调函数设置" << std::endl;
}
// 8. 释放资源
std::cout << "[8] 释放共享内存资源..." << std::endl;
std::cout << "[9] 释放GIL..." << std::endl;
PyGILState_Release(gstate);
std::cout << "=== 音频处理完成 ===" << std::endl;
} catch (const std::exception& e) {
std::cout << "[EXCEPTION] 异常捕获: " << e.what() << std::endl;
PyGILState_Release(gstate);
std::cerr << "Audio process error: " << e.what() << std::endl;
}
}
void RTCContext::onProducer(uint32_t msgId, mrtc::MRTCProducerInfo& info)
{
std::cout << "-----------------------------------" << std::endl;
std::cout << "RTCContext::onProducer()" << std::endl;
}
bool RTCContext::init(const char* selfUserId, const char* selfDisplayName, const char* selfRoomId)
{
std::cout << "init, numpyApi_:" << numpyApi_[93] << std::endl;
if (!numpyApi_ || !numpyApi_[93]) { // 93是PyArray_SimpleNew的偏移量
std::cout << "numpyApi_ is null in init" << std::endl;
} else {
std::cout << "numpyApi_ is not null in init" << std::endl;
}
mrtc::IMRTCEngineFactory * rtcFactory = mrtc::getMRTCEngineFactory();
if (!rtcFactory)
{
return false;
}
rtcEngine_ = rtcFactory->produceMRTCEngine();
if (!rtcEngine_)
{
return false;
}
mrtc::MRTCEngineConfig engineConfig;
strcpy(engineConfig.domain, domain);
strcpy(engineConfig.applicationId, appid);
strcpy(engineConfig.appSecrectKey, appSecrectKey);
engineConfig.port = port;
if (0 != rtcEngine_->init(engineConfig, this))
{
std::cout << "RTCContext::instance().init() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->setUserInfo(selfUserId, selfDisplayName, selfRoomId))
{
std::cout << "RTCContext::instance().setUserInfo() failed" << std::endl;
return false;
}
mrtc::MRTCJoinAuthority authority;
strcpy(authority.applicationId, appid);
strcpy(authority.appSecretKey, appSecrectKey);
mrtc::MRTCJoinConfig loginConfig;
if (0!= rtcEngine_->joinRoom(authority, loginConfig))
{
std::cout << "RTCContext::instance().joinRoom() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_ROOM, this))
{
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false;
}
if (0 != rtcEngine_->registerListener(mrtc::MRTCListenerType::TYPE_LISTENER_CONSUMER, this))
{
std::cout << "RTCContext::instance().registerListener() failed" << std::endl;
return false;
}
namespace py = boost::python;
namespace np = boost::python::numpy;
Py_Initialize(); // 初始化 Python
np::initialize();
return true;
}
bool RTCContext::initRecv(const char* destRoomId, const char* srcUserId, const int16_t destChannelIndex)
{
isRecv_ = true;
return true;
}
bool RTCContext::initSend(const char* srcRoomId, const char* destRoomId, const int16_t destChannelIndex, uint8_t channelNum)
{
while (!isOnRoom_)
{
std::cout << "wait for OnRoom" << std::endl;
sleep(3);
}
if (std::string(srcRoomId) != std::string(destRoomId)) {
isMultiRoom_ = true;
std::cout << "join multi room" << std::endl;
int16_t ret1 = rtcEngine_->joinMultiRoom(destRoomId);
if (ret1 != 0)
{
std::cout << "joinMultiRoom fail, ret:" << ret1;
return false;
}
} else {
isMultiRoom_ = false;
}
mrtc::MRTCAudioOption option;
option.channel = channelNum;
if (std::string(srcRoomId) != std::string(destRoomId)) {
strcpy(option.dstRoomId, destRoomId);
}
option.channelIndex = destChannelIndex;
std::cout << "startCustomAudio" << std::endl;
int16_t ret2 = rtcEngine_->startCustomAudio(option);
if (ret2 != 0)
{
std::cout << "startCustomAudio fail, ret:" << ret2;
return false;
}
std::cout << "init send succ" << std::endl;
return true;
}
void RTCContext::destorySend(const int16_t selfChannelIndex)
{
rtcEngine_->stopCustomAudio(selfChannelIndex);
}
int16_t RTCContext::sendAudioData(uint8_t channelIndex, const void* pData, int32_t nSampleRate, uint64_t nNumberOfChannels,
uint64_t dataLength)
{
std::lock_guard<std::mutex> lock(mutex_);
if (pData_)
{
return rtcEngine_->sendCustomAudioData(channelIndex, pData, nSampleRate, nNumberOfChannels, dataLength);
}
return 0;
}
int16_t RTCContext::sendCustomAudioData(const int16_t channelIndex, void* customData, int32_t sampleRate,
uint64_t channelNum, uint64_t dataLen)
{
while(!isOnRoom_ || (isMultiRoom_ && !isJoinMultiRoom_)) {
std::cout << "wait for room and multi room before send" << std::endl;
sleep(3);
}
std::lock_guard<std::mutex> lock(mutex_);
if (customData == nullptr) {
std::cout << "customData is null" << std::endl;
return -1;
}
std::cout << "customData addr is:" << customData << std::endl;
return rtcEngine_->sendCustomAudioData(channelIndex, customData, sampleRate,
channelNum, dataLen);
}
mrtc::IMRTCEngine* RTCContext::getRtcEngine() const
{
std::lock_guard<std::mutex> lock(mutex_);
return rtcEngine_;
}
void* RTCContext::getpData() const
{
std::lock_guard<std::mutex> lock(mutex_);
return pData_;
}
void RTCContext::setpData(void* pData)
{
std::lock_guard<std::mutex> lock(mutex_);
pData_ = pData;
}
void RTCContext::setPyCallback(boost::python::object callback) {
std::lock_guard<std::mutex> lock(mutex_);
pyCallback_ = callback;
}
void RTCContext::setNumpyApi(void **numpyApi) {
std::lock_guard<std::mutex> lock(mutex_);
numpyApi_ = numpyApi;
std::cout << "setNupyApi, numpyApi_:" << numpyApi_[93] << std::endl;
}