Planet
navi homePPSaboutscreenshotsdownloaddevelopmentforum

Changeset 1491


Ignore:
Timestamp:
May 31, 2008, 11:48:01 AM (16 years ago)
Author:
scheusso
Message:

enet is not threadsafe (catched that now); some first step towards dedicated server

Location:
code/branches/network
Files:
7 edited

Legend:

Unmodified
Added
Removed
  • code/branches/network/TODO

    r1432 r1491  
     1- should we use enet_peer_ping to test if a client is still alive ?
     2- enet_host_broadcast ? (to all peers)
     3- enet_host_check_events ? instead of enet_host_service
  • code/branches/network/src/network/ClientConnection.cc

    r1409 r1491  
    5050{
    5151  //static boost::thread_group network_threads;
     52
     53  boost::recursive_mutex ClientConnection::enet_mutex_;
    5254
    5355  ClientConnection::ClientConnection(int port, std::string address) {
     
    124126      return false;
    125127    }
     128    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    126129    if(enet_peer_send(server, 0, packet)<0)
    127130      return false;
     
    143146    if(server==NULL)
    144147      return false;
     148    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    145149    enet_host_flush(client);
    146150    return true;
     
    149153  void ClientConnection::receiverThread() {
    150154    // what about some error-handling here ?
    151     enet_initialize();
    152155    atexit(enet_deinitialize);
    153156    ENetEvent *event;
    154     client = enet_host_create(NULL, NETWORK_CLIENT_MAX_CONNECTIONS, 0, 0);
     157    {
     158      boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     159      enet_initialize();
     160      client = enet_host_create(NULL, NETWORK_CLIENT_MAX_CONNECTIONS, 0, 0);
     161    }
    155162    if(client==NULL) {
    156163      COUT(2) << "ClientConnection: could not create client host" << std::endl;
     
    168175      event = new ENetEvent;
    169176      //std::cout << "connection loop" << std::endl;
    170       if(enet_host_service(client, event, NETWORK_CLIENT_TIMEOUT)<0){
    171         // we should never reach this point
    172         quit=true;
    173         // add some error handling here ========================
     177      {
     178        boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     179        if(enet_host_service(client, event, NETWORK_CLIENT_TIMEOUT)<0){
     180          // we should never reach this point
     181          quit=true;
     182          continue;
     183          // add some error handling here ========================
     184        }
    174185      }
    175186      switch(event->type){
     
    196207    if(!disconnectConnection())
    197208      // if disconnecting failed destroy conn.
     209      boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    198210      enet_peer_reset(server);
    199211    return;
     
    202214  bool ClientConnection::disconnectConnection() {
    203215    ENetEvent event;
     216    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    204217    enet_peer_disconnect(server, 0);
    205218    while(enet_host_service(client, &event, NETWORK_CLIENT_TIMEOUT) > 0){
     
    222235    ENetEvent event;
    223236    // connect to peer (server is type ENetPeer*)
     237    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    224238    server = enet_host_connect(client, &serverAddress, NETWORK_CLIENT_CHANNELS);
    225239    if(server==NULL) {
  • code/branches/network/src/network/ClientConnection.h

    r1443 r1491  
    9696    ENetPeer *server;
    9797    boost::thread *receiverThread_;
     98   
     99    static boost::recursive_mutex enet_mutex_;
    98100  };
    99101
  • code/branches/network/src/network/ConnectionManager.cc

    r1409 r1491  
    6565 
    6666  ConnectionManager::ConnectionManager():receiverThread_(0){}
     67  boost::recursive_mutex ConnectionManager::enet_mutex_;
    6768 
    6869  ConnectionManager::ConnectionManager(ClientInformation *head) : receiverThread_(0) {
     
    143144    if(!temp)
    144145      return false;
     146    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    145147    if(enet_peer_send(peer, (enet_uint8)temp->getID() , packet)!=0)
    146148      return false;
     
    152154    if(!temp)
    153155      return false;
     156    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    154157    if(enet_peer_send(temp->getPeer(), (enet_uint8)clientID, packet)!=0)
    155158      return false;
     
    158161
    159162  bool ConnectionManager::addPacketAll(ENetPacket *packet) {
     163    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    160164    for(ClientInformation *i=head_->next(); i!=0; i=i->next()){
    161165      if(enet_peer_send(i->getPeer(), (enet_uint8)i->getID(), packet)!=0)
     
    177181    if(server==NULL)
    178182      return false;
     183    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
    179184    enet_host_flush(server);
    180185    return true;
     
    184189    // what about some error-handling here ?
    185190    ENetEvent *event;
    186     enet_initialize();
    187191    atexit(enet_deinitialize);
    188     server = enet_host_create(&bindAddress, NETWORK_MAX_CONNECTIONS, 0, 0);
     192    { //scope of the mutex
     193      boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     194      enet_initialize();
     195      server = enet_host_create(&bindAddress, NETWORK_MAX_CONNECTIONS, 0, 0);
     196    }
    189197    if(server==NULL){
    190198      // add some error handling here ==========================
     
    195203    while(!quit){
    196204      event = new ENetEvent;
    197       if(enet_host_service(server, event, NETWORK_WAIT_TIMEOUT)<0){
    198         // we should never reach this point
    199         quit=true;
    200         // add some error handling here ========================
     205      { //mutex scope
     206        boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     207        if(enet_host_service(server, event, NETWORK_WAIT_TIMEOUT)<0){
     208          // we should never reach this point
     209          quit=true;
     210          continue;
     211          // add some error handling here ========================
     212        }
    201213      }
    202214      switch(event->type){
     
    231243    disconnectClients();
    232244    // if we're finishied, destroy server
    233     enet_host_destroy(server);
     245    {
     246      boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     247      enet_host_destroy(server);
     248    }
    234249  }
    235250 
     
    241256    ClientInformation *temp = head_->next();
    242257    while(temp!=0){
    243       enet_peer_disconnect(temp->getPeer(), 0);
     258      {
     259        boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     260        enet_peer_disconnect(temp->getPeer(), 0);
     261      }
    244262      temp = temp->next();
    245263    }
    246264    //bugfix: might be the reason why server crashes when clients disconnects
    247     //temp = temp->next();
    248265    temp = head_->next();
    249     while( temp!=0 && enet_host_service(server, &event, NETWORK_WAIT_TIMEOUT) > 0){
     266    boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     267    while( temp!=0 && enet_host_service(server, &event, NETWORK_WAIT_TIMEOUT) >= 0){
    250268      switch (event.type)
    251269      {
     
    273291  }
    274292
    275   /*bool ConnectionManager::clientDisconnect(ENetPeer *peer) {
    276     COUT(4) << "removing client from list" << std::endl;
    277     return removeClient(head_->findClient(&(peer->address))->getID());
    278   }*/
    279293/**
    280294This function adds a client that connects to the clientlist of the server
     
    422436 
    423437  void ConnectionManager::disconnectClient(ClientInformation *client){
    424     enet_peer_disconnect(client->getPeer(), 0);
     438    {
     439      boost::recursive_mutex::scoped_lock lock(enet_mutex_);
     440      enet_peer_disconnect(client->getPeer(), 0);
     441    }
    425442    removeShip(client);
    426443  }
  • code/branches/network/src/network/ConnectionManager.h

    r1409 r1491  
    4848#include <enet/enet.h>
    4949#include <boost/thread/thread.hpp>
     50#include <boost/thread/recursive_mutex.hpp>
    5051
    5152#include "PacketBuffer.h"
     
    116117
    117118    boost::thread *receiverThread_;
     119    static boost::recursive_mutex enet_mutex_;
    118120//     int getNumberOfClients();
    119121    //functions to map what object every clients uses
  • code/branches/network/src/orxonox/Orxonox.cc

    r1466 r1491  
    186186    else if (mode == "server")
    187187      mode_ = SERVER;
     188    else if (mode == "dedicated")
     189      mode_= DEDICATED;
    188190    else
    189191    {
     
    215217  bool Orxonox::start()
    216218  {
    217     //if (mode == DEDICATED)
     219    //if (mode_ == DEDICATED)
    218220    // do something else
    219221    //else
     
    433435      // don't forget to call _fireFrameStarted in ogre to make sure
    434436      // everything goes smoothly
    435       ogreRoot._fireFrameStarted(evt);
     437      if(mode_!=DEDICATED)
     438        ogreRoot._fireFrameStarted(evt);
    436439
    437440      // get current time
     
    439442      calculateEventTime(now, eventTimes[2]);
    440443
    441       ogreRoot._updateAllRenderTargets(); // only render in non-server mode
     444      if(mode_!=DEDICATED)
     445        ogreRoot._updateAllRenderTargets(); // only render in non-server mode
    442446
    443447      // get current time
     
    449453
    450454      // again, just to be sure ogre works fine
    451       ogreRoot._fireFrameEnded(evt);
     455      if(mode_!=DEDICATED)
     456        ogreRoot._fireFrameEnded(evt);
    452457          }
    453458
  • code/branches/network/src/orxonox/Orxonox.h

    r1446 r1491  
    5050    SERVER,
    5151    CLIENT,
    52     STANDALONE
     52    STANDALONE,
     53    DEDICATED
    5354  };
    5455
Note: See TracChangeset for help on using the changeset viewer.