Usuario:Muro Bot/Scripts/comandos

# -*- coding: utf8 -*-
#Revierte la zona de pruebas cada hora
import wikipedia, os, urllib, thread, catlib, re, time
import editarticle
import urllib2
#from pagetext import pageText
from time import sleep
from category import *
import pagegenerators
#import vivoredirect
#import borradomasivo
 
def pageText(url):
        request=urllib2.Request(url)
        user_agent='Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.7.12) Gecko/20050915 Firefox/1.0.7'
        wikipedia.output (url)
        request.add_header("User-Agent", user_agent)
        response=urllib2.urlopen(request)
        text=response.read()
        response.close()
        return text
 
 
editing=[] #controlador de conflictos de edición / autoreversione
 
salidaprograma=False
 
def getrefs(page):
    tch=wikipedia.Page(wikipedia.getSite(), page)
    refs=[]
    if tch.namespace() == 6: #para las imágenes, el método es distinto...
        imgpg=pageText('http://es.wikipedia.org/wiki/'+tch.urlname()).decode('utf-8')
        chop=imgpg.split('<h2 id="filelinks">Enlaces a la imagen</h2>')[1].split('</ul>')[0]
        tabla=chop.split('title="')
        tabla.remove(tabla[0])
        for cerca in tabla:
                name=cerca.split('"')[0]
                if not re.search(ur'^Especial:', name):
                        refs.append(name)
 
    else:
 
        a=wikipedia.Page(wikipedia.getSite(), page).urlname() # Convierte a formato url ej.: "Plantilla: en icon" → "Plantilla%3AEn_icon"
        crudo=pageText('http://es.wikipedia.org/w/index.php?title=Special:Whatlinkshere/'+a+'&limit=5000&from=0')
        crudo=crudo.split('<!-- content -->')[1].split('<!-- /content -->')[0].decode('utf-8')
        tabla=crudo.split('<li>')
        tabla.remove(tabla[0])
        for cerca in tabla:
                name=cerca.split('title="')[1].split('"')[0]
                if not re.search(ur'^Especial:', name):
                        refs.append(name)
    return refs
 
def getincrefs(page):
    tch=wikipedia.Page(wikipedia.getSite(), page)
    refs=[]
    a=wikipedia.Page(wikipedia.getSite(), page).urlname() # Convierte a formato url ej.: "Plantilla: en icon" → "Plantilla%3AEn_icon"
    crudo=pageText('http://es.wikipedia.org/w/index.php?title=Special:Whatlinkshere/'+a+'&hidelinks=1&hideredirs=1&limit=5000&from=0')
    crudo=crudo.split('<!-- content -->')[1].split('<!-- /content -->')[0].decode('utf-8')
    tabla=crudo.split('<li>')
    tabla.remove(tabla[0])
    for cerca in tabla:
        name=cerca.split('title="')[1].split('"')[0]
        if not re.search(ur'^Especial:', name):
                        refs.append(name)
    return refs
 
 
def unescape(lines):
    lines=''.join(lines)
    lines = lines.replace('&gt;', '>')
    lines = lines.replace('&lt;', '<')
    lines = lines.replace('&quot;', '"')
    lines = lines.replace('&amp;', '&')
    return lines
 

def escape (R):
    R=re.sub('[ _]', '[_ ]', R)
    exc=['.', '(', ')']
    for ex in exc:
        R=R.replace(ex, '\\'+ex)
    return R
 
def trazar(txt):
        try:
            tp=wikipedia.Page(wikipedia.getSite(), u'User:Muro Bot/Comandos/traza')
            tptext=tp.get()
            tp.put(tptext+'\n*'+txt, u'Problema: '+txt)
        except wikipedia.NoPage:
            wikipedia.output('No existe /traza')
 
 
##Completely eliminate an image from usage
def retirar_imagen(img):
    for link in getrefs('File:'+img):
        link=link.split('"')[0]
        ll=wikipedia.Page(wikipedia.getSite(), link)
        if ll.namespace()==0:
            while ll in editing:
                time.sleep(1)
            editing.append(ll)
            txt=ll.get()
            rx='\[\[[iI]magen?: *['+img[0].upper()+img[0].lower()+']'+img[1:]
            rx=escape(rx)
            posis=re.findall(rx, txt)
            for posi in posis:
                alimento=[]
                lolly=txt.split(posi)[1]
                pop=lolly.split(']]')[0]
                #is it simple?
                if not '[[' in pop:
                    newTxt=txt.replace(posi+pop+']]', '')
                    if newTxt != txt:
                        try:
                            ll.put(newTxt, u'[[User:Muro Bot/Comandos|Bot]]: Retirando imagen «'+img+u'»')
                        except wikipedia.LockedPage:
                            continue
                #or is it hard?
                else:
                    tabla=lolly.split('[[')
                    for board in tabla:
                        alimento.append('[['+board)
                        if board.count(']]')>1:
                            break
                    #wikipedia.output (alimento)
                    #wikipedia.output (alimento[0])
                    alimento[0]=alimento[0].replace('[[', '')
                    last=alimento[-1].split(']]')
                    last.remove(last[-1])
                    last=']]'.join(last)+']]'
                    finalrex=posi+''.join(alimento[:-1])+last
                    #wikipedia.output (finalrex in txt)
                    newTxt=txt.replace(finalrex, '')
                    #wikipedia.output ([txt])
                    #wikipedia.output ([finalrex])
                    if txt != newTxt:
                        try:
                            ll.put(newTxt, u'[[Usuario:Muro Bot/Comandos|Bot]]: Retirando imagen «'+img+u'»')
                        except wikipedia.LockedPage:
                            continue
            while ll in editing:
                editing.remove(ll)
def replaceimage(link, imgR, img2, img1):
        ll=wikipedia.Page(wikipedia.getSite(), link)
        while ll in editing:
            time.sleep(1)
        editing.append(ll)
        txt=ll.get()
        newTxt=re.sub(imgR, img2, txt)
            #wikipedia.output (imgR)
        if newTxt != txt:
            wikipedia.output (u'Reemplazando '+img1+' por '+img2+' en [['+link+']]')
            ll.put(newTxt, u'[[User:Muro Bot/Comandos|Bot]]: Reemplazando '+img1+' por '+img2)
        else:
            ll.put(newTxt, '')
        while ll in editing:
            editing.remove(ll)
 
##Replace one image with another
def reemplazar_imagen(img1, img2):
    R='['+img1[0].upper()+img1[0].lower()+']'+img1[1:]
    R=escape(R)

    for link in getrefs('Image:'+img1):
    #start a new thread so an error doesn't swrew up the whole thing
        thread.start_new_thread(replaceimage, (link, R, img2, img1))
        time.sleep(.3)
 
##Bulletins/newsletters
def boletin(msg, userS):
    mm=wikipedia.Page(wikipedia.getSite(), msg)
    messageText=mm.get()
    uu=wikipedia.Page(wikipedia.getSite(), userS)
    while uu in editing:
        time.sleep(1)
    editing.append(uu)
    poslist=uu.get().split('\n')
    a=True
    users=[]
    for user in poslist:
        if user[0:2]=='<!-':
            a=False
        elif user=='':
            a=False
        if a:
            users.append(user)
    for user in users:
        try:
            tp=wikipedia.Page(wikipedia.getSite(), 'User Talk:'+user)
            tptext=tp.get()
            if messageText not in tptext:
                tp.put(tptext+'\n\n'+messageText, u'[[User:Muro Bot/Comandos|Bot]]: Boletín')
        except wikipedia.NoPage:
            continue
        except wikipedia.SpamfilterError:
            trazar(u'Bloqueo por Spam Filter en [[Usuario Discusión:'+user+u']]')            
            wikipedia.output (u'¡ATENCIÓN! Bloqueo por Spam Filter en [[Usuario Discusión:'+user+u']]')
            continue
        except wikipedia.IsRedirectPage:
            trazar(u'[[Usuario Discusión:'+user+u']] es una redirección')
            wikipedia.output (u'¡ATENCIÓN! [[Usuario Discusión:'+user+u']] es una redirección')
            continue
        except wikipedia.LockedPage:
            trazar(u'[[Usuario Discusión:'+user+u']] está protegida')            
            wikipedia.output (u'¡ATENCIÓN! [[Usuario Discusión:'+user+u']] está protegida')
            continue
    while uu in editing:
        editing.remove(uu)
 
def cambiar(txtold, txtnew):

    R='\[\[ *['+txtold[0].upper()+txtold[0].lower()+']'+txtold[1:]
    tol=txtold[0].lower()+txtold[1:]
    tou=txtold[0].upper()+txtold[1:]
    Rl='\[\[ *'+tol
    Rl=escape(Rl)
    Rlc=Rl+'\]\]'
    Ru='\[\[ *'+tou
    Ru=escape(Ru)
    Ruc=Ru+'\]\]'
    newlink='[['+txtnew
    tnl=txtnew[0].lower()+txtnew[1:]
    tnu=txtnew[0].upper()+txtnew[1:]
    R2u='\[\['+txtnew+'\|'+tnu+'\]\]'
    R2u=escape(R2u)
    R2l='\[\['+txtnew+'\|'+tnl+'\]\]'
    R2l=escape(R2l)

    for tit in getrefs(txtold):
        tt=wikipedia.Page(wikipedia.getSite(), tit)
        wikipedia.output (u'Buscando [['+txtold+u']] en [['+tit+u']] para cambiarlo por [['+txtnew+u']]')
        try:
            while tt in editing:
                time.sleep(1)
            editing.append(tt)
            old=tt.get()
            new=re.sub(Rlc,newlink+'|'+tol+']]',old)
            new=re.sub(Ruc,newlink+'|'+tou+']]',new)
            new=re.sub(R,newlink,new)
            new=re.sub(R2u,'[['+tnu+']]',new)
            new=re.sub(R2l,'[['+tnl+']]',new)
            if old != new:
                wikipedia.output (u'Cambiando enlaces a «'+txtold+u'» por «'+txtnew+u'» en [['+tit+u']]')
                tt.put(new, u'[[User:Muro Bot/Comandos|Bot]]: Reemplazando [['+txtold+u']] por [['+txtnew+']]')
            while tt in editing:
                editing.remove(tt)
        except wikipedia.NoPage:
            continue
        except wikipedia.SpamfilterError:
            trazar(u'Bloqueo por Spam Filter en [['+tit+u']]')            
            wikipedia.output (u'¡ATENCIÓN! Bloqueo por Spam Filter en [['+tit+u']]')
            continue
        except wikipedia.IsRedirectPage:
            trazar(u'[['+tit+u']] es una redirección')
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] es una redirección')
            continue
        except wikipedia.LockedPage:
            trazar(u'[['+tit+u']] está protegida')            
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] está protegida')
            continue
 
 
 
### For redundant categories
def redund(page):
    toremove=[]
    pl=wikipedia.Page(wikipedia.getSite(), page)
    while pl in editing:
        time.sleep(1)
    editing.append(pl)
    old=pl.get()
    new=old
    categs=pl.categories()
    for categ in categs:
        supers=categ.supercategories()
        for Super in supers:
            if Super in categs:
                #wikipedia.output (Super.title()+" is in ")
                toremove.append(Super)
    for cat in toremove:
        tittat=cat.title().split(':')
        tit=''.join(tittat[1:])
        tit=escape(tit)
        rex='\[\[[cC]ategor(.a|y): *['+tit[0].lower()+tit[0].upper()+']'+tit[1:]+'\]\]'
        wikipedia.output ([rex])
        new=re.sub(rex, '', new, re.UNICODE)
        if new != old:
            wikipedia.output (" "*len(pl.title())+"REDUNDANT")
            pl.put(new, u'[[User:Muro Bot/Comandos|Bot]]: Eliminando categorías redundantes')
    while pl in editing:
        editing.remove(pl)
def feeder(cat):
    cat=catlib.Category(wikipedia.getSite(), 'Category:'+cat)
    articulos=cat.articles()
    for articulo in articulos:
        redund(articulo.title())
##Catstring
def sacacarne(art):
    inters = wikipedia.getLanguageLinks(art, insite = wikipedia.getSite())
    indices=[]
    wikipedia.output (inters)
    categories = wikipedia.getCategoryLinks(art, site = wikipedia.getSite())
    catR='\[\[ *categor.a?:.*\]\]'
    wikipedia.output (catR)
    tabla=re.findall(catR, art.lower())
    catInd=art.lower().index(tabla[0])
    indices.append(catInd)
    for inter in inters:
        lookinF=inters[inter].aslink().lower()
        try:
            indice=art.lower().index(lookinF)
            indices.append(indice)
        except ValueError:
            continue
    wikipedia.output (indices)
    indices.sort()
    carne=art[:indices[0]]
    cola=art[indices[0]:]
    return [carne, cola]
 
def catstring(category, string):
    editados=[]
    cat=catlib.Category(wikipedia.getSite(), 'Category:'+category)
    articulos=cat.articles()
    for articulo in articulos:
        if articulo.title().lower() in editados:
            articulos.remove(articulo)
            continue
        else:
            editados.append(articulo.title().lower())
            while articulo in editing:
                time.sleep(1)
            editing.append(articulo)
            txt=articulo.get()
            if string.lower() in txt.lower():
                articulos.remove(articulo)
                continue
            else:
                sacao=sacacarne(txt)
                nuevo=sacao[0]+string+'\n\n'+sacao[1]
                if txt !=nuevo:
                    articulo.put(nuevo, u'[[User:Muro Bot/Comandos|Bot]]: Introducción automática de una plantilla *en fase beta*')
            while articulo in editing:
                editing.remove(articulo)
 
## Messes with incorrect esbozos de|...
def cambiaparametros(old, new):
    cat=catlib.Category(wikipedia.getSite(), u'Category:Wikipedia:Esbozo '+old)
    #todos=cat._make_catlist()[0]
    gen = pagegenerators.CategorizedPageGenerator(cat)
    todos = pagegenerators.PreloadingGenerator(gen, pageNumber = 100)
 
    for todo in todos:
        #while pl in editing:
        #    time.sleep(1)
        #editing.append(pl)
        pl=wikipedia.Page(wikipedia.getSite(), todo)
        txt=unescape(pageText('http://es.wikipedia.org/wiki/Special:Export/'+pl.urlname()))
        txt=txt.split('<text')[1].split('>')[1].split('</text>')[0].decode('utf-8')
        rex=ur'\{\{((?:[mM]ini)?[eE]sbozo[ _]de\|)'+old+'\}\}'
        wikipedia.output ([rex])
        tablota=re.sub(rex, '{{'+ur'\1'+new+'}}',  txt).split('</text')[0]
        if tablota != txt:
          pl.put(tablota, u':[[User:Muro Bot/Comandos|Bot]]: Arreglando esbozos de '+old)
        while pl in editing:
          editing.remove(pl)
 
 
 
 
def moveplantilla(old, new):
#    tch=wikipedia.Page(wikipedia.getSite(), old)
#    if tch.namespace() == 10:
#        old1=tch.titleWithoutNamespace()
#    else:
#        old1=old
#    tch=wikipedia.Page(wikipedia.getSite(), new)
#    if tch.namespace() == 10:
#        new1=tch.titleWithoutNamespace()
#    else:
#        new1=new
    old1='Template:'+old
    for tit in getincrefs(old1):
        try:
            pl=wikipedia.Page(wikipedia.Site('es', 'wikipedia'), tit)
            while pl in editing:
                time.sleep(1)
            editing.append(pl)
            old2=escape(old)
            rex=ur'(\{\{((?:[pP]lantilla|[tT]emplate|[mM]sg)?:? *)?['+old2[0].upper()+old2[0].lower()+']'+old2[1:]+')'
            txt=pl.get()
            newTxt=re.sub(rex, u"{{"+new,txt)
            if (newTxt != txt):
                    wikipedia.output (u'Trasladando {{'+old+'}} a {{'+new+'}} en [['+tit+']]')
                    pl.put(newTxt, u'[[User:Muro Bot/Comandos|Bot]]: Trasladando {{'+old+'}} a {{'+new+'}}')
            while pl in editing:
                editing.remove(pl)
        except wikipedia.NoPage:
            continue
        except wikipedia.SpamfilterError:
            trazar(u'Bloqueo por Spam Filter en [['+tit+u']]')            
            wikipedia.output (u'¡ATENCIÓN! Bloqueo por Spam Filter en [['+tit+u']]')
            continue
        except wikipedia.IsRedirectPage:
            trazar(u'[['+tit+u']] es una redirección')
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] es una redirección')
            continue
        except wikipedia.LockedPage:
            trazar(u'[['+tit+u']] está protegida')            
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] está protegida')
            continue
 
 
 
 
def retiraplantillas(temp):
    R=escape(temp) 
    tR='\{\{(?:(?:[Pp]lantilla|[Tt]emplate): *)?['+R[0].upper()+R[0].lower()+']'
    tR+=R[1:]+ur'[ _]*(?:\||\}\})'
    tl='Template:'+temp
    for tit in getincrefs(tl):
        tt=wikipedia.Page(wikipedia.getSite(), tit)
        wikipedia.output ('Buscando {{'+temp+'}} en [['+tit+']] para retirarla')
        try:
            while tt in editing:
                time.sleep(1)
            editing.append(tt)
            old=tt.get()
            new=old
            encontrados=re.findall(tR, new)
            toreplace=[]
            for encontrado in encontrados:
                simple=0
                complete=0
                reestructura=[encontrado]
                chuleta=new.split(encontrado)[1]
                if encontrado[-2:]=='}}':
                    simple=1
                if simple==1:
                    toreplace.append(reestructura[0])
                    continue
                cortes=chuleta.split('}}')
                for corte in cortes:
                    wikipedia.output ('r:'+str(reestructura))
                    if not '{{' in corte:
                        complete=1
                    reestructura.append(corte+'}}')
                    if complete==1:
                        toreplace.append(''.join(reestructura))
                        break
            for tr in toreplace:
                new=new.replace(tr, '')
 
 
            if old != new:
                wikipedia.output (u'Retirando {{'+temp+u'}} de [['+tit+']]')
                tt.put(new, u'Retirando {{'+temp+u'}}')
            while tt in editing:
                editing.remove(tt)
        except wikipedia.NoPage:
            continue
        except wikipedia.LockedPage:
            trazar(u'[['+tit+u']] está protegida')            
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] está protegida')
            continue
        except wikipedia.SpamfilterError:
            trazar(u'Bloqueo por Spam Filter en [['+tit+u']]')            
            wikipedia.output (u'¡ATENCIÓN! Bloqueo por Spam Filter en [['+tit+u']]')
            continue
        except wikipedia.IsRedirectPage:
            trazar(u'[['+tit+u']] es una redirección')
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] es una redirección')
            continue
 
def sustituyeplantillas(template):
    R=escape(template)
    R='\{\{(?:(?:[Pp]lantilla|[Tt]emplate): *)?['+R[0].upper()+R[0].lower()+']'+R[1:]
    #wikipedia.output (R)
    tl='Template:'+template
    for tit in getincrefs(tl):
        tt=wikipedia.Page(wikipedia.getSite(), tit)
        wikipedia.output (u'Buscando {{'+template+u'}} en [['+tit+u']] para subst:ituirla')
        try:
            while tt in editing:
                time.sleep(1)
            editing.append(tt)
            old=tt.get()
            # new=re.sub(R, '',old)
            new=re.sub(R, u"{{subst:"+template,old)
            if old != new:
                tt.put(new, u'[[User:Muro Bot/Comandos|Bot]]: Sustituyendo plantilla: {{'+template+u'}}')
            while tt in editing:
                editing.remove(tt)
        except:
            continue
 
def replaceplantillasext(linkshere,template,texto):
    R=escape(template)
    R='\{\{ *(?:(?:[Pp]lantilla|[Tt]emplate): *)?['+R[0].upper()+R[0].lower()+']'+R[1:]+' *}\}'
    tch=wikipedia.Page(wikipedia.getSite(), linkshere)
    if tch.namespace() == 10:
      for tit in getincrefs(linkshere):
        tt=wikipedia.Page(wikipedia.getSite(), tit)
        wikipedia.output (u'Buscando {{'+template+u'}} en [['+tit+u']] para reemplazarla por «'+texto+u'»')
        try:
            while tt in editing:
                time.sleep(1)
            editing.append(tt)
            old=tt.get()
            # new=re.sub(R, '',old)
            new=re.sub(R, texto,old)
            #wikipedia.output (new, old)
            if old != new:
                wikipedia.output (u'Reemplazando {{'+template+u'}} por «'+texto+u'» en [['+tit+u']]')
                tt.put(new, u'[[User:Muro Bot/Comandos|Bot]]: Reemplazando {{'+template+u'}} por «'+texto+u'»')
            while tt in editing:
                editing.remove(tt)
        except wikipedia.NoPage:
            continue
        except wikipedia.SpamfilterError:
            trazar(u'Bloqueo por Spam Filter en [['+tit+u']]')            
            wikipedia.output (u'¡ATENCIÓN! Bloqueo por Spam Filter en [['+tit+u']]')
            continue
        except wikipedia.IsRedirectPage:
            trazar(u'[['+tit+u']] es una redirección')
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] es una redirección')
            continue
        except wikipedia.LockedPage:
            trazar(u'[['+tit+u']] está protegida')            
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] está protegida')
            continue

    else:
      for tit in getrefs(linkshere):
        tt=wikipedia.Page(wikipedia.getSite(), tit)
        wikipedia.output (u'Buscando {{'+template+u'}} en [['+tit+u']] para reemplazarla por «'+texto+u'»')
        try:
            while tt in editing:
                time.sleep(1)
            editing.append(tt)
            old=tt.get()
            # new=re.sub(R, '',old)
            new=re.sub(R, texto,old)
            #wikipedia.output (new, old)
            if old != new:
                wikipedia.output (u'Reemplazando {{'+template+u'}} por «'+texto+u'» en [['+tit+u']]')
                tt.put(new, u'[[User:Muro Bot/Comandos|Bot]]: Reemplazando {{'+template+u'}} por «'+texto+u'»')
            while tt in editing:
                editing.remove(tt)
        except wikipedia.NoPage:
            continue
        except wikipedia.SpamfilterError:
            trazar(u'Bloqueo por Spam Filter en [['+tit+u']]')            
            wikipedia.output (u'¡ATENCIÓN! Bloqueo por Spam Filter en [['+tit+u']]')
            continue
        except wikipedia.IsRedirectPage:
            trazar(u'[['+tit+u']] es una redirección')
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] es una redirección')
            continue
        except wikipedia.LockedPage:
            trazar(u'[['+tit+u']] está protegida')            
            wikipedia.output (u'¡ATENCIÓN! [['+tit+u']] está protegida')
            continue
 
 
def replaceplantillas(template,texto):
  tl='Template:'+template
  replaceplantillasext(tl,template,texto)
 
 
# import wikipedia, editarticle
"""
Script to remove links that are being or have been spammed.
Usage:
 
spamremove.py spammedsite.com
 
It will use Special:Linksearch to find the pages on the wiki that link to
that site, then for each page make a proposed change consisting of removing
all the lines where that url occurs. You can choose to:
* accept the changes as proposed
* edit the page yourself to remove the offending link
* not change the page in question
 
Command line options:
* -automatic: Do not ask, but remove the lines automatically. Be very careful
              in using this option!
 
"""
__version__ = '$Id: spamremove.py 3998 2007-08-07 20:28:27Z wikipedian $'
 
 
def retiraspam (spamSite):
    #### PARAMETERS ####
    # automatic = False
    automatic = True    
    controllednamespaces=[0]
 
    msg = {
        'de': u'Entferne in Spam-Whitelist eingetragenen Weblink auf %s',
        'en': u'Removing links to spammed site %s',
        'es': u'[[User:Muro Bot/Comandos|Bot]]: Retirando [[WP:EE|enlaces externos]] a %s',
        'nl': u'Links naar gespamde site %s verwijderd',
    }
 
    mysite = wikipedia.getSite()
    pages = list(set(mysite.linksearch(spamSite)))
    wikipedia.getall(mysite, pages)
    for p in pages:
        text = p.get()
        #if not spamSite in text:
        if not spamSite in text or not p.namespace() in controllednamespaces:
            #wikipedia.output(u"Pasando pg: no está en el namespace principal o no existe enlace.")
            continue
        # Show the title of the page we're working on.
        # Highlight the title in purple.
        wikipedia.output(u"Revisando enlaces de %s" % p.title())
        lines = text.split('\n')
        newpage = []
        lastok = ""
        for line in lines:
            if spamSite in line:
                if lastok:
                    wikipedia.output(lastok)
                wikipedia.output(u'\03{lightred}%s\03{default}' % line)
                lastok = None
            else:
                newpage.append(line)
                if line.strip():
                    if lastok is None:
                        wikipedia.output(line)
                    lastok = line
        if automatic:
            answer = "y"
        else:
            answer = wikipedia.inputChoice(u'\nDelete the red lines?',  ['yes', 'no', 'edit'], ['y', 'N', 'e'], 'n')
        if answer == "n":
            continue
        elif answer == "e":
            editor = editarticle.TextEditor()
            newtext = editor.edit(text, highlight = spamSite, jumpIndex = text.find(spamSite))
        else:
            newtext = "\n".join(newpage)
        if newtext != text:
            p.put(newtext, wikipedia.translate(mysite, msg) % spamSite)
 
 
 
## Run the live bot
hechas=[]
def reencola():
  while not salidaprograma:
    sleep(43200)
    wikipedia.output (u'Reencolando tareas periódicas')
    for chuleta in hechas:
      com=chuleta.split('|')[0]
      try:
        if not com.lower() in [u'boletín', 'boletin']:
          hechas.remove(chuleta)
      except:
        continue
 
 
def RUN():
    #wikipedia.output (wikipedia.getSite().loggedin())
    cmd=wikipedia.Page(wikipedia.getSite(), u'User:Muro Bot/Comandos')
    texto=cmd.get()
    chuletas=texto.split('{{')
    chuletas.remove(chuletas[0])
    newCatTitle=''
    for chuleta in chuletas:
        if chuleta in hechas:
            chuletas.remove(chuleta)
    for chuleta in chuletas:
 
        #wikipedia.output (hechas)
        #wikipedia.output ([chuleta])
        if not chuleta in hechas:
            com=chuleta.split('|')[0]
            try:
                    if com.lower() in ['trasladar cat', 'trasladar_cat']:
                        oldCatTitle=chuleta.split('|')[1]
                        newCatTitle=chuleta.split('|')[2].split('}}')[0]
                        bot = CategoryMoveRobot(oldCatTitle, newCatTitle)
                        thread.start_new_thread(bot.run, ())
                    elif com.lower() in ['retirar cat', 'retirar_cat']:
                        oldCatTitle=chuleta.split('|')[1].split('}}')[0]
                        bot = CategoryRemoveRobot(oldCatTitle)
                        thread.start_new_thread(bot.run, ())
                    elif com.lower() in ['retirar imagen', 'retirar_imagen']:
                        imgn=chuleta.split('|')[1].split('}}')[0]
                        thread.star_new_thread(retirar_imagen, (imgn,))
                    elif com.lower() in ['reemplazar imagen', 'reemplazar_imagen']:
                        img1=chuleta.split('|')[1]
                        img2=chuleta.split('|')[2].split('}}')[0]
                        thread.start_new_thread(reemplazar_imagen, (img1, img2))
                    elif com.lower() in [u'boletín', 'boletin']:
                        msgP=chuleta.split('|')[1]
                        userSP=chuleta.split('|')[2].split('}}')[0]
                        thread.start_new_thread(boletin, (msgP, userSP))
                    elif com.lower()=='cambiar':
                        viejo=chuleta.split('|')[1]
                        nuevo=chuleta.split('|')[2].split('}}')[0]
                        thread.start_new_thread(cambiar, (viejo, nuevo))
                    elif com.lower()== 'redund':
                        art=chuleta.split('|')[1].split('}}')[0]
                        thread.start_new_thread(redund, (art, ))
                    elif com.lower()== 'redundcat':
                        cate=chuleta.split('|')[1].split('}}')[0]
                        thread.start_new_thread(feeder, (cate, ))
                    elif com.lower()== 'esbozode':
                        old=chuleta.split('|')[1]
                        new=chuleta.split('|')[2].split('}}')[0]
                        thread.start_new_thread(cambiaparametros, (old, new))
                    elif com.lower()== 'cambiarplantilla':
                        old=chuleta.split('|')[1]
                        new=chuleta.split('|')[2].split('}}')[0]
                        thread.start_new_thread(moveplantilla, (old, new))
                    elif com.lower() in ('retirar plantilla', 'retirar_plantilla'):
                        temp=chuleta.split('|')[1].split('}}')[0]
                        thread.start_new_thread(retiraplantillas, (temp, ))
                    elif com.lower() in ('subst plantilla', 'subst_plantilla'):
                        temp=chuleta.split('|')[1].split('}}')[0]
                        thread.start_new_thread(sustituyeplantillas, (temp, ))
                    elif com.lower()== 'replaceplantillaext':
                        linkhere=chuleta.split('|')[1]
                        old=chuleta.split('|')[2]
                        new=chuleta.split('|')[3].split('}}')[0]
                        thread.start_new_thread(replaceplantillasext, (linkhere,old, new))
                    elif com.lower()== 'replaceplantilla':
                        old=chuleta.split('|')[1]
                        new=chuleta.split('|')[2].split('}}')[0]
                        thread.start_new_thread(replaceplantillas, (old, new))
                    elif com.lower() in ('retirar enlace', 'retirar_enlace'):
                        temp=chuleta.split('|')[1].split('}}')[0]
                        thread.start_new_thread(retiraspam, (temp, ))
                    #elif com.lower() in ('arregla redirecciones', 'arregla_redirecciones'):
                    #   namespace=-1 # including all namespaces
                    #   gen = RedirectGenerator(xmlFilename, namespace, restart)
                    #   bot = RedirectRobot('double', gen)
                    #   thread.start_new_thread(bot.run, ())
 
                    #elif com.lower() in ('desconectar'):
                    #   salidaprograma=True
                    hechas.append(chuleta)
            except:
                continue
 
    chops=texto.split('<texto fijo>')
    chops.remove(chops[0])
    importantes=[]
    for chop in chops:
        important=chop.split('</texto fijo>')[0]
        importantes.append(important)
    for importante in importantes:
        if importante in hechas:
            wikipedia.output ("Skipping "+importante+ " because we already did it!")
            importantes.remove(importante)
    for importante in importantes:
        wikipedia.output (importante)
        wikipedia.output (hechas)
        hechas.append(importante)
        cat=importante.split('<cat>')[1].split('</cat>')[0]
        texto=importante.split('<texto>')[1].split('</texto>')[0]
        thread.start_new_thread(catstring, (cat, texto))
 
thread.start_new_thread(reencola,())
while not salidaprograma:
    #wikipedia.output (time.time())
    RUN()
    sleep(30)